[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 1/6] x86/mm: add optional cache to GLA->GFN translation



> -----Original Message-----
> From: Jan Beulich [mailto:JBeulich@xxxxxxxx]
> Sent: 19 July 2018 11:46
> To: xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>
> Cc: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>; Andrew Cooper
> <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant <Paul.Durrant@xxxxxxxxxx>;
> George Dunlap <George.Dunlap@xxxxxxxxxx>; tamas@xxxxxxxxxxxxx; Tim
> (Xen.org) <tim@xxxxxxx>
> Subject: [PATCH 1/6] x86/mm: add optional cache to GLA->GFN translation
> 
> The caching isn't actually implemented here, this is just setting the
> stage.
> 
> Touching these anyway also
> - make their return values gfn_t
> - gva -> gla in their names
> - name their input arguments gla
> 
> At the use sites do the conversion to gfn_t as suitable.
> 
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> 
> --- a/xen/arch/x86/debug.c
> +++ b/xen/arch/x86/debug.c
> @@ -51,7 +51,7 @@ dbg_hvm_va2mfn(dbgva_t vaddr, struct dom
> 
>      DBGP2("vaddr:%lx domid:%d\n", vaddr, dp->domain_id);
> 
> -    *gfn = _gfn(paging_gva_to_gfn(dp->vcpu[0], vaddr, &pfec));
> +    *gfn = paging_gla_to_gfn(dp->vcpu[0], vaddr, &pfec, NULL);
>      if ( gfn_eq(*gfn, INVALID_GFN) )
>      {
>          DBGP2("kdb:bad gfn from gva_to_gfn\n");
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -678,7 +678,8 @@ static int hvmemul_linear_to_phys(
>      struct hvm_emulate_ctxt *hvmemul_ctxt)
>  {
>      struct vcpu *curr = current;
> -    unsigned long pfn, npfn, done, todo, i, offset = addr & ~PAGE_MASK;
> +    gfn_t gfn, ngfn;
> +    unsigned long done, todo, i, offset = addr & ~PAGE_MASK;
>      int reverse;
> 
>      /*
> @@ -700,15 +701,17 @@ static int hvmemul_linear_to_phys(
>      if ( reverse && ((PAGE_SIZE - offset) < bytes_per_rep) )
>      {
>          /* Do page-straddling first iteration forwards via recursion. */
> -        paddr_t _paddr;
> +        paddr_t gaddr;
>          unsigned long one_rep = 1;
>          int rc = hvmemul_linear_to_phys(
> -            addr, &_paddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
> +            addr, &gaddr, bytes_per_rep, &one_rep, pfec, hvmemul_ctxt);
> +
>          if ( rc != X86EMUL_OKAY )
>              return rc;
> -        pfn = _paddr >> PAGE_SHIFT;
> +        gfn = gaddr_to_gfn(gaddr);
>      }
> -    else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) ==
> gfn_x(INVALID_GFN) )
> +    else if ( gfn_eq(gfn = paging_gla_to_gfn(curr, addr, &pfec, NULL),
> +                     INVALID_GFN) )
>      {
>          if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
>              return X86EMUL_RETRY;
> @@ -723,11 +726,11 @@ static int hvmemul_linear_to_phys(
>      {
>          /* Get the next PFN in the range. */
>          addr += reverse ? -PAGE_SIZE : PAGE_SIZE;
> -        npfn = paging_gva_to_gfn(curr, addr, &pfec);
> +        ngfn = paging_gla_to_gfn(curr, addr, &pfec, NULL);
> 
>          /* Is it contiguous with the preceding PFNs? If not then we're done. 
> */
> -        if ( (npfn == gfn_x(INVALID_GFN)) ||
> -             (npfn != (pfn + (reverse ? -i : i))) )
> +        if ( gfn_eq(ngfn, INVALID_GFN) ||
> +             !gfn_eq(ngfn, gfn_add(gfn, reverse ? -i : i)) )
>          {
>              if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
>                  return X86EMUL_RETRY;
> @@ -735,7 +738,7 @@ static int hvmemul_linear_to_phys(
>              if ( done == 0 )
>              {
>                  ASSERT(!reverse);
> -                if ( npfn != gfn_x(INVALID_GFN) )
> +                if ( !gfn_eq(ngfn, INVALID_GFN) )
>                      return X86EMUL_UNHANDLEABLE;
>                  *reps = 0;
>                  x86_emul_pagefault(pfec, addr & PAGE_MASK, &hvmemul_ctxt-
> >ctxt);
> @@ -748,7 +751,8 @@ static int hvmemul_linear_to_phys(
>          done += PAGE_SIZE;
>      }
> 
> -    *paddr = ((paddr_t)pfn << PAGE_SHIFT) | offset;
> +    *paddr = gfn_to_gaddr(gfn) | offset;
> +
>      return X86EMUL_OKAY;
>  }
> 
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -2684,7 +2684,7 @@ static void *hvm_map_entry(unsigned long
>       * treat it as a kernel-mode read (i.e. no access checks).
>       */
>      pfec = PFEC_page_present;
> -    gfn = paging_gva_to_gfn(current, va, &pfec);
> +    gfn = gfn_x(paging_gla_to_gfn(current, va, &pfec, NULL));
>      if ( pfec & (PFEC_page_paged | PFEC_page_shared) )
>          goto fail;
> 
> @@ -3115,7 +3115,7 @@ enum hvm_translation_result hvm_translat
> 
>      if ( linear )
>      {
> -        gfn = _gfn(paging_gva_to_gfn(v, addr, &pfec));
> +        gfn = paging_gla_to_gfn(v, addr, &pfec, NULL);
> 
>          if ( gfn_eq(gfn, INVALID_GFN) )
>          {
> --- a/xen/arch/x86/hvm/monitor.c
> +++ b/xen/arch/x86/hvm/monitor.c
> @@ -130,7 +130,7 @@ static inline unsigned long gfn_of_rip(u
> 
>      hvm_get_segment_register(curr, x86_seg_cs, &sreg);
> 
> -    return paging_gva_to_gfn(curr, sreg.base + rip, &pfec);
> +    return gfn_x(paging_gla_to_gfn(curr, sreg.base + rip, &pfec, NULL));
>  }
> 
>  int hvm_monitor_debug(unsigned long rip, enum
> hvm_monitor_debug_type type,
> --- a/xen/arch/x86/mm/guest_walk.c
> +++ b/xen/arch/x86/mm/guest_walk.c
> @@ -81,8 +81,9 @@ static bool set_ad_bits(guest_intpte_t *
>   */
>  bool
>  guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
> -                  unsigned long va, walk_t *gw,
> -                  uint32_t walk, mfn_t top_mfn, void *top_map)
> +                  unsigned long gla, walk_t *gw, uint32_t walk,
> +                  gfn_t top_gfn, mfn_t top_mfn, void *top_map,
> +                  struct hvmemul_cache *cache)
>  {
>      struct domain *d = v->domain;
>      p2m_type_t p2mt;
> @@ -116,7 +117,7 @@ guest_walk_tables(struct vcpu *v, struct
> 
>      perfc_incr(guest_walk);
>      memset(gw, 0, sizeof(*gw));
> -    gw->va = va;
> +    gw->va = gla;
>      gw->pfec = walk & (PFEC_user_mode | PFEC_write_access);
> 
>      /*
> @@ -133,7 +134,7 @@ guest_walk_tables(struct vcpu *v, struct
>      /* Get the l4e from the top level table and check its flags*/
>      gw->l4mfn = top_mfn;
>      l4p = (guest_l4e_t *) top_map;
> -    gw->l4e = l4p[guest_l4_table_offset(va)];
> +    gw->l4e = l4p[guest_l4_table_offset(gla)];
>      gflags = guest_l4e_get_flags(gw->l4e);
>      if ( !(gflags & _PAGE_PRESENT) )
>          goto out;
> @@ -163,7 +164,7 @@ guest_walk_tables(struct vcpu *v, struct
>      }
> 
>      /* Get the l3e and check its flags*/
> -    gw->l3e = l3p[guest_l3_table_offset(va)];
> +    gw->l3e = l3p[guest_l3_table_offset(gla)];
>      gflags = guest_l3e_get_flags(gw->l3e);
>      if ( !(gflags & _PAGE_PRESENT) )
>          goto out;
> @@ -205,7 +206,7 @@ guest_walk_tables(struct vcpu *v, struct
> 
>          /* Increment the pfn by the right number of 4k pages. */
>          start = _gfn((gfn_x(start) & ~GUEST_L3_GFN_MASK) +
> -                     ((va >> PAGE_SHIFT) & GUEST_L3_GFN_MASK));
> +                     ((gla >> PAGE_SHIFT) & GUEST_L3_GFN_MASK));
>          gw->l1e = guest_l1e_from_gfn(start, flags);
>          gw->l2mfn = gw->l1mfn = INVALID_MFN;
>          leaf_level = 3;
> @@ -215,7 +216,7 @@ guest_walk_tables(struct vcpu *v, struct
>  #else /* PAE only... */
> 
>      /* Get the l3e and check its flag */
> -    gw->l3e = ((guest_l3e_t *) top_map)[guest_l3_table_offset(va)];
> +    gw->l3e = ((guest_l3e_t *)top_map)[guest_l3_table_offset(gla)];
>      gflags = guest_l3e_get_flags(gw->l3e);
>      if ( !(gflags & _PAGE_PRESENT) )
>          goto out;
> @@ -242,14 +243,14 @@ guest_walk_tables(struct vcpu *v, struct
>      }
> 
>      /* Get the l2e */
> -    gw->l2e = l2p[guest_l2_table_offset(va)];
> +    gw->l2e = l2p[guest_l2_table_offset(gla)];
> 
>  #else /* 32-bit only... */
> 
>      /* Get l2e from the top level table */
>      gw->l2mfn = top_mfn;
>      l2p = (guest_l2e_t *) top_map;
> -    gw->l2e = l2p[guest_l2_table_offset(va)];
> +    gw->l2e = l2p[guest_l2_table_offset(gla)];
> 
>  #endif /* All levels... */
> 
> @@ -310,7 +311,7 @@ guest_walk_tables(struct vcpu *v, struct
> 
>          /* Increment the pfn by the right number of 4k pages. */
>          start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) +
> -                     guest_l1_table_offset(va));
> +                     guest_l1_table_offset(gla));
>  #if GUEST_PAGING_LEVELS == 2
>           /* Wider than 32 bits if PSE36 superpage. */
>          gw->el1e = (gfn_x(start) << PAGE_SHIFT) | flags;
> @@ -334,7 +335,7 @@ guest_walk_tables(struct vcpu *v, struct
>          gw->pfec |= rc & PFEC_synth_mask;
>          goto out;
>      }
> -    gw->l1e = l1p[guest_l1_table_offset(va)];
> +    gw->l1e = l1p[guest_l1_table_offset(gla)];
>      gflags = guest_l1e_get_flags(gw->l1e);
>      if ( !(gflags & _PAGE_PRESENT) )
>          goto out;
> @@ -443,22 +444,22 @@ guest_walk_tables(struct vcpu *v, struct
>          break;
> 
>      case 1:
> -        if ( set_ad_bits(&l1p[guest_l1_table_offset(va)].l1, &gw->l1e.l1,
> +        if ( set_ad_bits(&l1p[guest_l1_table_offset(gla)].l1, &gw->l1e.l1,
>                           (walk & PFEC_write_access)) )
>              paging_mark_dirty(d, gw->l1mfn);
>          /* Fallthrough */
>      case 2:
> -        if ( set_ad_bits(&l2p[guest_l2_table_offset(va)].l2, &gw->l2e.l2,
> +        if ( set_ad_bits(&l2p[guest_l2_table_offset(gla)].l2, &gw->l2e.l2,
>                           (walk & PFEC_write_access) && leaf_level == 2) )
>              paging_mark_dirty(d, gw->l2mfn);
>          /* Fallthrough */
>  #if GUEST_PAGING_LEVELS == 4 /* 64-bit only... */
>      case 3:
> -        if ( set_ad_bits(&l3p[guest_l3_table_offset(va)].l3, &gw->l3e.l3,
> +        if ( set_ad_bits(&l3p[guest_l3_table_offset(gla)].l3, &gw->l3e.l3,
>                           (walk & PFEC_write_access) && leaf_level == 3) )
>              paging_mark_dirty(d, gw->l3mfn);
> 
> -        if ( set_ad_bits(&l4p[guest_l4_table_offset(va)].l4, &gw->l4e.l4,
> +        if ( set_ad_bits(&l4p[guest_l4_table_offset(gla)].l4, &gw->l4e.l4,
>                           false) )
>              paging_mark_dirty(d, gw->l4mfn);
>  #endif
> --- a/xen/arch/x86/mm/hap/guest_walk.c
> +++ b/xen/arch/x86/mm/hap/guest_walk.c
> @@ -26,8 +26,8 @@ asm(".file \"" __OBJECT_FILE__ "\"");
>  #include <xen/sched.h>
>  #include "private.h" /* for hap_gva_to_gfn_* */
> 
> -#define _hap_gva_to_gfn(levels) hap_gva_to_gfn_##levels##_levels
> -#define hap_gva_to_gfn(levels) _hap_gva_to_gfn(levels)
> +#define _hap_gla_to_gfn(levels) hap_gla_to_gfn_##levels##_levels
> +#define hap_gla_to_gfn(levels) _hap_gla_to_gfn(levels)
> 
>  #define _hap_p2m_ga_to_gfn(levels)
> hap_p2m_ga_to_gfn_##levels##_levels
>  #define hap_p2m_ga_to_gfn(levels) _hap_p2m_ga_to_gfn(levels)
> @@ -39,16 +39,10 @@ asm(".file \"" __OBJECT_FILE__ "\"");
>  #include <asm/guest_pt.h>
>  #include <asm/p2m.h>
> 
> -unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(
> -    struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t
> *pfec)
> -{
> -    unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3];
> -    return hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(v, p2m, cr3, gva,
> pfec, NULL);
> -}
> -
> -unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
> +static unsigned long ga_to_gfn(
>      struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3,
> -    paddr_t ga, uint32_t *pfec, unsigned int *page_order)
> +    paddr_t ga, uint32_t *pfec, unsigned int *page_order,
> +    struct hvmemul_cache *cache)
>  {
>      bool walk_ok;
>      mfn_t top_mfn;
> @@ -91,7 +85,8 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
>  #if GUEST_PAGING_LEVELS == 3
>      top_map += (cr3 & ~(PAGE_MASK | 31));
>  #endif
> -    walk_ok = guest_walk_tables(v, p2m, ga, &gw, *pfec, top_mfn,
> top_map);
> +    walk_ok = guest_walk_tables(v, p2m, ga, &gw, *pfec,
> +                                top_gfn, top_mfn, top_map, cache);
>      unmap_domain_page(top_map);
>      put_page(top_page);
> 
> @@ -137,6 +132,21 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PA
>      return gfn_x(INVALID_GFN);
>  }
> 
> +gfn_t hap_gla_to_gfn(GUEST_PAGING_LEVELS)(
> +    struct vcpu *v, struct p2m_domain *p2m, unsigned long gla, uint32_t
> *pfec,
> +    struct hvmemul_cache *cache)
> +{
> +    unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3];
> +
> +    return _gfn(ga_to_gfn(v, p2m, cr3, gla, pfec, NULL, cache));
> +}
> +
> +unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
> +    struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3,
> +    paddr_t ga, uint32_t *pfec, unsigned int *page_order)
> +{
> +    return ga_to_gfn(v, p2m, cr3, ga, pfec, page_order, NULL);
> +}
> 
>  /*
>   * Local variables:
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -743,10 +743,11 @@ hap_write_p2m_entry(struct domain *d, un
>          p2m_flush_nestedp2m(d);
>  }
> 
> -static unsigned long hap_gva_to_gfn_real_mode(
> -    struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t
> *pfec)
> +static gfn_t hap_gla_to_gfn_real_mode(
> +    struct vcpu *v, struct p2m_domain *p2m, unsigned long gla, uint32_t
> *pfec,
> +    struct hvmemul_cache *cache)
>  {
> -    return ((paddr_t)gva >> PAGE_SHIFT);
> +    return gaddr_to_gfn(gla);
>  }
> 
>  static unsigned long hap_p2m_ga_to_gfn_real_mode(
> @@ -762,7 +763,7 @@ static unsigned long hap_p2m_ga_to_gfn_r
>  static const struct paging_mode hap_paging_real_mode = {
>      .page_fault             = hap_page_fault,
>      .invlpg                 = hap_invlpg,
> -    .gva_to_gfn             = hap_gva_to_gfn_real_mode,
> +    .gla_to_gfn             = hap_gla_to_gfn_real_mode,
>      .p2m_ga_to_gfn          = hap_p2m_ga_to_gfn_real_mode,
>      .update_cr3             = hap_update_cr3,
>      .update_paging_modes    = hap_update_paging_modes,
> @@ -773,7 +774,7 @@ static const struct paging_mode hap_pagi
>  static const struct paging_mode hap_paging_protected_mode = {
>      .page_fault             = hap_page_fault,
>      .invlpg                 = hap_invlpg,
> -    .gva_to_gfn             = hap_gva_to_gfn_2_levels,
> +    .gla_to_gfn             = hap_gla_to_gfn_2_levels,
>      .p2m_ga_to_gfn          = hap_p2m_ga_to_gfn_2_levels,
>      .update_cr3             = hap_update_cr3,
>      .update_paging_modes    = hap_update_paging_modes,
> @@ -784,7 +785,7 @@ static const struct paging_mode hap_pagi
>  static const struct paging_mode hap_paging_pae_mode = {
>      .page_fault             = hap_page_fault,
>      .invlpg                 = hap_invlpg,
> -    .gva_to_gfn             = hap_gva_to_gfn_3_levels,
> +    .gla_to_gfn             = hap_gla_to_gfn_3_levels,
>      .p2m_ga_to_gfn          = hap_p2m_ga_to_gfn_3_levels,
>      .update_cr3             = hap_update_cr3,
>      .update_paging_modes    = hap_update_paging_modes,
> @@ -795,7 +796,7 @@ static const struct paging_mode hap_pagi
>  static const struct paging_mode hap_paging_long_mode = {
>      .page_fault             = hap_page_fault,
>      .invlpg                 = hap_invlpg,
> -    .gva_to_gfn             = hap_gva_to_gfn_4_levels,
> +    .gla_to_gfn             = hap_gla_to_gfn_4_levels,
>      .p2m_ga_to_gfn          = hap_p2m_ga_to_gfn_4_levels,
>      .update_cr3             = hap_update_cr3,
>      .update_paging_modes    = hap_update_paging_modes,
> --- a/xen/arch/x86/mm/hap/private.h
> +++ b/xen/arch/x86/mm/hap/private.h
> @@ -24,18 +24,21 @@
>  /********************************************/
>  /*          GUEST TRANSLATION FUNCS         */
>  /********************************************/
> -unsigned long hap_gva_to_gfn_2_levels(struct vcpu *v,
> -                                     struct p2m_domain *p2m,
> -                                     unsigned long gva,
> -                                     uint32_t *pfec);
> -unsigned long hap_gva_to_gfn_3_levels(struct vcpu *v,
> -                                     struct p2m_domain *p2m,
> -                                     unsigned long gva,
> -                                     uint32_t *pfec);
> -unsigned long hap_gva_to_gfn_4_levels(struct vcpu *v,
> -                                     struct p2m_domain *p2m,
> -                                     unsigned long gva,
> -                                     uint32_t *pfec);
> +gfn_t hap_gla_to_gfn_2_levels(struct vcpu *v,
> +                              struct p2m_domain *p2m,
> +                              unsigned long gla,
> +                              uint32_t *pfec,
> +                              struct hvmemul_cache *cache);
> +gfn_t hap_gla_to_gfn_3_levels(struct vcpu *v,
> +                              struct p2m_domain *p2m,
> +                              unsigned long gla,
> +                              uint32_t *pfec,
> +                              struct hvmemul_cache *cache);
> +gfn_t hap_gla_to_gfn_4_levels(struct vcpu *v,
> +                              struct p2m_domain *p2m,
> +                              unsigned long gla,
> +                              uint32_t *pfec,
> +                              struct hvmemul_cache *cache);
> 
>  unsigned long hap_p2m_ga_to_gfn_2_levels(struct vcpu *v,
>      struct p2m_domain *p2m, unsigned long cr3,
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1968,16 +1968,16 @@ void np2m_schedule(int dir)
>      }
>  }
> 
> -unsigned long paging_gva_to_gfn(struct vcpu *v,
> -                                unsigned long va,
> -                                uint32_t *pfec)
> +gfn_t paging_gla_to_gfn(struct vcpu *v, unsigned long gla, uint32_t *pfec,
> +                        struct hvmemul_cache *cache)
>  {
>      struct p2m_domain *hostp2m = p2m_get_hostp2m(v->domain);
>      const struct paging_mode *hostmode = paging_get_hostmode(v);
> 
>      if ( is_hvm_vcpu(v) && paging_mode_hap(v->domain) &&
> nestedhvm_is_n2(v) )
>      {
> -        unsigned long l2_gfn, l1_gfn;
> +        gfn_t l2_gfn;
> +        unsigned long l1_gfn;
>          struct p2m_domain *p2m;
>          const struct paging_mode *mode;
>          uint8_t l1_p2ma;
> @@ -1987,31 +1987,31 @@ unsigned long paging_gva_to_gfn(struct v
>          /* translate l2 guest va into l2 guest gfn */
>          p2m = p2m_get_nestedp2m(v);
>          mode = paging_get_nestedmode(v);
> -        l2_gfn = mode->gva_to_gfn(v, p2m, va, pfec);
> +        l2_gfn = mode->gla_to_gfn(v, p2m, gla, pfec, cache);
> 
> -        if ( l2_gfn == gfn_x(INVALID_GFN) )
> -            return gfn_x(INVALID_GFN);
> +        if ( gfn_eq(l2_gfn, INVALID_GFN) )
> +            return INVALID_GFN;
> 
>          /* translate l2 guest gfn into l1 guest gfn */
> -        rv = nestedhap_walk_L1_p2m(v, l2_gfn, &l1_gfn, &l1_page_order,
> &l1_p2ma,
> -                                   1,
> +        rv = nestedhap_walk_L1_p2m(v, gfn_x(l2_gfn), &l1_gfn,
> &l1_page_order,
> +                                   &l1_p2ma, 1,
>                                     !!(*pfec & PFEC_write_access),
>                                     !!(*pfec & PFEC_insn_fetch));
> 
>          if ( rv != NESTEDHVM_PAGEFAULT_DONE )
> -            return gfn_x(INVALID_GFN);
> +            return INVALID_GFN;
> 
>          /*
>           * Sanity check that l1_gfn can be used properly as a 4K mapping, 
> even
>           * if it mapped by a nested superpage.
>           */
> -        ASSERT((l2_gfn & ((1ul << l1_page_order) - 1)) ==
> +        ASSERT((gfn_x(l2_gfn) & ((1ul << l1_page_order) - 1)) ==
>                 (l1_gfn & ((1ul << l1_page_order) - 1)));
> 
> -        return l1_gfn;
> +        return _gfn(l1_gfn);
>      }
> 
> -    return hostmode->gva_to_gfn(v, hostp2m, va, pfec);
> +    return hostmode->gla_to_gfn(v, hostp2m, gla, pfec, cache);
>  }
> 
>  /*
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -1699,15 +1699,15 @@ static unsigned int shadow_get_allocatio
>  static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned long vaddr,
>                                  struct sh_emulate_ctxt *sh_ctxt)
>  {
> -    unsigned long gfn;
> +    gfn_t gfn;
>      struct page_info *page;
>      mfn_t mfn;
>      p2m_type_t p2mt;
>      uint32_t pfec = PFEC_page_present | PFEC_write_access;
> 
>      /* Translate the VA to a GFN. */
> -    gfn = paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec);
> -    if ( gfn == gfn_x(INVALID_GFN) )
> +    gfn = paging_get_hostmode(v)->gla_to_gfn(v, NULL, vaddr, &pfec,
> NULL);
> +    if ( gfn_eq(gfn, INVALID_GFN) )
>      {
>          x86_emul_pagefault(pfec, vaddr, &sh_ctxt->ctxt);
> 
> @@ -1717,7 +1717,7 @@ static mfn_t emulate_gva_to_mfn(struct v
>      /* Translate the GFN to an MFN. */
>      ASSERT(!paging_locked_by_me(v->domain));
> 
> -    page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_ALLOC);
> +    page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt,
> P2M_ALLOC);
> 
>      /* Sanity checking. */
>      if ( page == NULL )
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -173,17 +173,20 @@ delete_shadow_status(struct domain *d, m
> 
>  static inline bool
>  sh_walk_guest_tables(struct vcpu *v, unsigned long va, walk_t *gw,
> -                     uint32_t pfec)
> +                     uint32_t pfec, struct hvmemul_cache *cache)
>  {
>      return guest_walk_tables(v, p2m_get_hostp2m(v->domain), va, gw, pfec,
> +                             _gfn(paging_mode_external(v->domain)
> +                                  ? cr3_pa(v->arch.hvm_vcpu.guest_cr[3]) >> 
> PAGE_SHIFT
> +                                  : pagetable_get_pfn(v->arch.guest_table)),
>  #if GUEST_PAGING_LEVELS == 3 /* PAE */
>                               INVALID_MFN,
> -                             v->arch.paging.shadow.gl3e
> +                             v->arch.paging.shadow.gl3e,
>  #else /* 32 or 64 */
>                               pagetable_get_mfn(v->arch.guest_table),
> -                             v->arch.paging.shadow.guest_vtable
> +                             v->arch.paging.shadow.guest_vtable,
>  #endif
> -                             );
> +                             cache);
>  }
> 
>  /* This validation is called with lock held, and after write permission
> @@ -3035,7 +3038,7 @@ static int sh_page_fault(struct vcpu *v,
>       * shadow page table. */
>      version = atomic_read(&d->arch.paging.shadow.gtable_dirty_version);
>      smp_rmb();
> -    walk_ok = sh_walk_guest_tables(v, va, &gw, error_code);
> +    walk_ok = sh_walk_guest_tables(v, va, &gw, error_code, NULL);
> 
>  #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
>      regs->error_code &= ~PFEC_page_present;
> @@ -3675,9 +3678,9 @@ static bool sh_invlpg(struct vcpu *v, un
>  }
> 
> 
> -static unsigned long
> -sh_gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
> -    unsigned long va, uint32_t *pfec)
> +static gfn_t
> +sh_gla_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
> +    unsigned long gla, uint32_t *pfec, struct hvmemul_cache *cache)
>  /* Called to translate a guest virtual address to what the *guest*
>   * pagetables would map it to. */
>  {
> @@ -3687,24 +3690,25 @@ sh_gva_to_gfn(struct vcpu *v, struct p2m
> 
>  #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
>      /* Check the vTLB cache first */
> -    unsigned long vtlb_gfn = vtlb_lookup(v, va, *pfec);
> +    unsigned long vtlb_gfn = vtlb_lookup(v, gla, *pfec);
> +
>      if ( vtlb_gfn != gfn_x(INVALID_GFN) )
> -        return vtlb_gfn;
> +        return _gfn(vtlb_gfn);
>  #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
> 
> -    if ( !(walk_ok = sh_walk_guest_tables(v, va, &gw, *pfec)) )
> +    if ( !(walk_ok = sh_walk_guest_tables(v, gla, &gw, *pfec, cache)) )
>      {
>          *pfec = gw.pfec;
> -        return gfn_x(INVALID_GFN);
> +        return INVALID_GFN;
>      }
>      gfn = guest_walk_to_gfn(&gw);
> 
>  #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
>      /* Remember this successful VA->GFN translation for later. */
> -    vtlb_insert(v, va >> PAGE_SHIFT, gfn_x(gfn), *pfec);
> +    vtlb_insert(v, gla >> PAGE_SHIFT, gfn_x(gfn), *pfec);
>  #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB) */
> 
> -    return gfn_x(gfn);
> +    return gfn;
>  }
> 
> 
> @@ -4949,7 +4953,7 @@ int sh_audit_l4_table(struct vcpu *v, mf
>  const struct paging_mode sh_paging_mode = {
>      .page_fault                    = sh_page_fault,
>      .invlpg                        = sh_invlpg,
> -    .gva_to_gfn                    = sh_gva_to_gfn,
> +    .gla_to_gfn                    = sh_gla_to_gfn,
>      .update_cr3                    = sh_update_cr3,
>      .update_paging_modes           = shadow_update_paging_modes,
>      .write_p2m_entry               = shadow_write_p2m_entry,
> --- a/xen/arch/x86/mm/shadow/none.c
> +++ b/xen/arch/x86/mm/shadow/none.c
> @@ -43,11 +43,12 @@ static bool _invlpg(struct vcpu *v, unsi
>      return true;
>  }
> 
> -static unsigned long _gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
> -                                 unsigned long va, uint32_t *pfec)
> +static gfn_t _gla_to_gfn(struct vcpu *v, struct p2m_domain *p2m,
> +                         unsigned long gla, uint32_t *pfec,
> +                         struct hvmemul_cache *cache)
>  {
>      ASSERT_UNREACHABLE();
> -    return gfn_x(INVALID_GFN);
> +    return INVALID_GFN;
>  }
> 
>  static void _update_cr3(struct vcpu *v, int do_locking, bool noflush)
> @@ -70,7 +71,7 @@ static void _write_p2m_entry(struct doma
>  static const struct paging_mode sh_paging_none = {
>      .page_fault                    = _page_fault,
>      .invlpg                        = _invlpg,
> -    .gva_to_gfn                    = _gva_to_gfn,
> +    .gla_to_gfn                    = _gla_to_gfn,
>      .update_cr3                    = _update_cr3,
>      .update_paging_modes           = _update_paging_modes,
>      .write_p2m_entry               = _write_p2m_entry,
> --- a/xen/include/asm-x86/guest_pt.h
> +++ b/xen/include/asm-x86/guest_pt.h
> @@ -425,7 +425,8 @@ static inline unsigned int guest_walk_to
> 
>  bool
>  guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, unsigned long
> va,
> -                  walk_t *gw, uint32_t pfec, mfn_t top_mfn, void *top_map);
> +                  walk_t *gw, uint32_t pfec, gfn_t top_gfn, mfn_t top_mfn,
> +                  void *top_map, struct hvmemul_cache *cache);
> 
>  /* Pretty-print the contents of a guest-walk */
>  static inline void print_gw(const walk_t *gw)
> --- a/xen/include/asm-x86/hvm/vcpu.h
> +++ b/xen/include/asm-x86/hvm/vcpu.h
> @@ -53,6 +53,8 @@ struct hvm_mmio_cache {
>      uint8_t buffer[32];
>  };
> 
> +struct hvmemul_cache;
> +
>  struct hvm_vcpu_io {
>      /* I/O request in flight to device model. */
>      enum hvm_io_completion io_completion;
> --- a/xen/include/asm-x86/paging.h
> +++ b/xen/include/asm-x86/paging.h
> @@ -107,10 +107,11 @@ struct paging_mode {
>      int           (*page_fault            )(struct vcpu *v, unsigned long va,
>                                              struct cpu_user_regs *regs);
>      bool          (*invlpg                )(struct vcpu *v, unsigned long 
> va);
> -    unsigned long (*gva_to_gfn            )(struct vcpu *v,
> +    gfn_t         (*gla_to_gfn            )(struct vcpu *v,
>                                              struct p2m_domain *p2m,
> -                                            unsigned long va,
> -                                            uint32_t *pfec);
> +                                            unsigned long gla,
> +                                            uint32_t *pfec,
> +                                            struct hvmemul_cache *cache);
>      unsigned long (*p2m_ga_to_gfn         )(struct vcpu *v,
>                                              struct p2m_domain *p2m,
>                                              unsigned long cr3,
> @@ -246,9 +247,10 @@ void paging_invlpg(struct vcpu *v, unsig
>   * SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS:
>   * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled.
>   */
> -unsigned long paging_gva_to_gfn(struct vcpu *v,
> -                                unsigned long va,
> -                                uint32_t *pfec);
> +gfn_t paging_gla_to_gfn(struct vcpu *v,
> +                        unsigned long va,
> +                        uint32_t *pfec,
> +                        struct hvmemul_cache *cache);
> 
>  /* Translate a guest address using a particular CR3 value.  This is used
>   * to by nested HAP code, to walk the guest-supplied NPT tables as if
> 
> 


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.