[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH for-4.19? v4 4/6] x86: Make the maximum number of altp2m views configurable



On Sat, May 18, 2024 at 11:02:15AM +0000, Petr Beneš wrote:
> From: Petr Beneš <w1benny@xxxxxxxxx>
> 
> This commit introduces the ability to configure the maximum number of altp2m
> views for the domain during its creation. Previously, the limits were 
> hardcoded
> to a maximum of 10. This change allows for greater flexibility in environments
> that require more or fewer altp2m views.
> 
> The maximum configurable limit for max_altp2m on x86 is now set to MAX_EPTP
> (512). This cap is linked to the architectural limit of the EPTP-switching
> VMFUNC, which supports up to 512 entries. Despite there being no inherent need
> for limiting max_altp2m in scenarios not utilizing VMFUNC, decoupling these
> components would necessitate substantial code changes.
> 
> Signed-off-by: Petr Beneš <w1benny@xxxxxxxxx>
> ---
>  xen/arch/x86/domain.c             | 12 ++++
>  xen/arch/x86/hvm/hvm.c            |  8 ++-
>  xen/arch/x86/hvm/vmx/vmx.c        |  2 +-
>  xen/arch/x86/include/asm/domain.h |  7 +--
>  xen/arch/x86/include/asm/p2m.h    |  6 +-
>  xen/arch/x86/mm/altp2m.c          | 91 +++++++++++++++++++------------
>  xen/arch/x86/mm/hap/hap.c         |  6 +-
>  xen/arch/x86/mm/mem_access.c      | 24 ++++----
>  xen/arch/x86/mm/mem_sharing.c     |  2 +-
>  xen/arch/x86/mm/p2m-ept.c         | 12 ++--
>  xen/arch/x86/mm/p2m.c             |  8 +--
>  xen/common/domain.c               |  1 +
>  xen/include/public/domctl.h       |  5 +-
>  xen/include/xen/sched.h           |  2 +
>  14 files changed, 116 insertions(+), 70 deletions(-)
> 
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index 00a3aaa576..3bd18cb2d0 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -685,6 +685,18 @@ int arch_sanitise_domain_config(struct 
> xen_domctl_createdomain *config)
>          return -EINVAL;
>      }

You also need to adjust arch_sanitise_domain_config() in ARM to return
an error if nr_altp2m is set, as there's no support for altp2m on ARM
yet.

> 
> +    if ( config->nr_altp2m && !hvm_altp2m_supported() )
> +    {
> +        dprintk(XENLOG_INFO, "altp2m requested but not available\n");
> +        return -EINVAL;
> +    }
> +
> +    if ( config->nr_altp2m > MAX_EPTP )
> +    {
> +        dprintk(XENLOG_INFO, "nr_altp2m must be <= %lu\n", MAX_EPTP);
> +        return -EINVAL;
> +    }
> +
>      if ( config->vmtrace_size )
>      {
>          unsigned int size = config->vmtrace_size;
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 9594e0a5c5..77e4016bdb 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -4639,6 +4639,12 @@ static int do_altp2m_op(
>          goto out;
>      }
> 
> +    if ( d->nr_altp2m == 0 )

I would merge this with the previous check, which also returns
-EINVAL.

> +    {
> +        rc = -EINVAL;
> +        goto out;
> +    }
> +
>      if ( (rc = xsm_hvm_altp2mhvm_op(XSM_OTHER, d, mode, a.cmd)) )
>          goto out;
> 
> @@ -5228,7 +5234,7 @@ void hvm_fast_singlestep(struct vcpu *v, uint16_t 
> p2midx)
>      if ( !hvm_is_singlestep_supported() )
>          return;
> 
> -    if ( p2midx >= MAX_ALTP2M )
> +    if ( p2midx >= v->domain->nr_altp2m )
>          return;
> 
>      v->arch.hvm.single_step = true;
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 5f67a48592..76ee09b701 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -4888,7 +4888,7 @@ bool asmlinkage vmx_vmenter_helper(const struct 
> cpu_user_regs *regs)
>          {
>              unsigned int i;
> 
> -            for ( i = 0; i < MAX_ALTP2M; ++i )
> +            for ( i = 0; i < currd->nr_altp2m; ++i )
>              {
>                  if ( currd->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
>                      continue;
> diff --git a/xen/arch/x86/include/asm/domain.h 
> b/xen/arch/x86/include/asm/domain.h
> index f5daeb182b..3935328781 100644
> --- a/xen/arch/x86/include/asm/domain.h
> +++ b/xen/arch/x86/include/asm/domain.h
> @@ -258,11 +258,10 @@ struct paging_vcpu {
>      struct shadow_vcpu shadow;
>  };
> 
> -#define MAX_NESTEDP2M 10
> -
> -#define MAX_ALTP2M      10 /* arbitrary */
>  #define INVALID_ALTP2M  0xffff
>  #define MAX_EPTP        (PAGE_SIZE / sizeof(uint64_t))
> +#define MAX_NESTEDP2M   10
> +
>  struct p2m_domain;
>  struct time_scale {
>      int shift;
> @@ -353,7 +352,7 @@ struct arch_domain
> 
>      /* altp2m: allow multiple copies of host p2m */
>      bool altp2m_active;
> -    struct p2m_domain *altp2m_p2m[MAX_ALTP2M];
> +    struct p2m_domain **altp2m_p2m;
>      mm_lock_t altp2m_list_lock;
>      uint64_t *altp2m_eptp;
>      uint64_t *altp2m_visible_eptp;
> diff --git a/xen/arch/x86/include/asm/p2m.h b/xen/arch/x86/include/asm/p2m.h
> index 111badf89a..e66c081149 100644
> --- a/xen/arch/x86/include/asm/p2m.h
> +++ b/xen/arch/x86/include/asm/p2m.h
> @@ -881,7 +881,7 @@ static inline struct p2m_domain *p2m_get_altp2m(struct 
> vcpu *v)
>      if ( index == INVALID_ALTP2M )
>          return NULL;
> 
> -    BUG_ON(index >= MAX_ALTP2M);
> +    BUG_ON(index >= v->domain->nr_altp2m);
> 
>      return v->domain->arch.altp2m_p2m[index];
>  }
> @@ -891,7 +891,7 @@ static inline bool p2m_set_altp2m(struct vcpu *v, 
> unsigned int idx)
>  {
>      struct p2m_domain *orig;
> 
> -    BUG_ON(idx >= MAX_ALTP2M);
> +    BUG_ON(idx >= v->domain->nr_altp2m);
> 
>      if ( idx == vcpu_altp2m(v).p2midx )
>          return false;
> @@ -943,7 +943,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t 
> gfn,
>                                  p2m_type_t p2mt, p2m_access_t p2ma);
> 
>  /* Set a specific p2m view visibility */
> -int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int altp2m_idx,
> +int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int idx,
>                                     uint8_t visible);
>  #else /* !CONFIG_HVM */
>  struct p2m_domain *p2m_get_altp2m(struct vcpu *v);
> diff --git a/xen/arch/x86/mm/altp2m.c b/xen/arch/x86/mm/altp2m.c
> index 6fe1e9ed6b..5cb71c8b8e 100644
> --- a/xen/arch/x86/mm/altp2m.c
> +++ b/xen/arch/x86/mm/altp2m.c
> @@ -15,6 +15,11 @@
>  void
>  altp2m_vcpu_initialise(struct vcpu *v)
>  {
> +    struct domain *d = v->domain;
> +
> +    if ( d->nr_altp2m == 0 )
> +        return;
> +
>      if ( v != current )
>          vcpu_pause(v);
> 
> @@ -30,8 +35,12 @@ altp2m_vcpu_initialise(struct vcpu *v)
>  void
>  altp2m_vcpu_destroy(struct vcpu *v)
>  {
> +    struct domain *d = v->domain;
>      struct p2m_domain *p2m;
> 
> +    if ( d->nr_altp2m == 0 )
> +        return;
> +
>      if ( v != current )
>          vcpu_pause(v);
> 
> @@ -122,7 +131,12 @@ int p2m_init_altp2m(struct domain *d)
>      struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
> 
>      mm_lock_init(&d->arch.altp2m_list_lock);
> -    for ( i = 0; i < MAX_ALTP2M; i++ )
> +    d->arch.altp2m_p2m = xzalloc_array(struct p2m_domain *, d->nr_altp2m);
> +
> +    if ( !d->arch.altp2m_p2m )
> +        return -ENOMEM;
> +
> +    for ( i = 0; i < d->nr_altp2m; i++ )
>      {
>          d->arch.altp2m_p2m[i] = p2m = p2m_init_one(d);
>          if ( p2m == NULL )
> @@ -143,7 +157,10 @@ void p2m_teardown_altp2m(struct domain *d)
>      unsigned int i;
>      struct p2m_domain *p2m;
> 
> -    for ( i = 0; i < MAX_ALTP2M; i++ )
> +    if ( !d->arch.altp2m_p2m )
> +        return;
> +
> +    for ( i = 0; i < d->nr_altp2m; i++ )
>      {
>          if ( !d->arch.altp2m_p2m[i] )
>              continue;
> @@ -151,6 +168,8 @@ void p2m_teardown_altp2m(struct domain *d)
>          d->arch.altp2m_p2m[i] = NULL;
>          p2m_free_one(p2m);
>      }
> +
> +    XFREE(d->arch.altp2m_p2m);
>  }
> 
>  int altp2m_get_effective_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t 
> *mfn,
> @@ -200,7 +219,7 @@ bool p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, 
> unsigned int idx)
>      struct domain *d = v->domain;
>      bool rc = false;
> 
> -    if ( idx >= MAX_ALTP2M )
> +    if ( idx >= d->nr_altp2m )
>          return rc;
> 
>      altp2m_list_lock(d);
> @@ -306,8 +325,8 @@ static void p2m_reset_altp2m(struct domain *d, unsigned 
> int idx,
>  {
>      struct p2m_domain *p2m;
> 
> -    ASSERT(idx < MAX_ALTP2M);
> -    p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
> +    ASSERT(idx < d->nr_altp2m);
> +    p2m = d->arch.altp2m_p2m[idx];

If the array_access_nospec() is not required removing it should be a
separate commit.  Also there's no mention of why removing the nospec
is safe in the commit message.

> 
>      p2m_lock(p2m);
> 
> @@ -332,7 +351,7 @@ void p2m_flush_altp2m(struct domain *d)
> 
>      altp2m_list_lock(d);
> 
> -    for ( i = 0; i < MAX_ALTP2M; i++ )
> +    for ( i = 0; i < d->nr_altp2m; i++ )
>      {
>          p2m_reset_altp2m(d, i, ALTP2M_DEACTIVATE);
>          d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN);
> @@ -348,9 +367,9 @@ static int p2m_activate_altp2m(struct domain *d, unsigned 
> int idx,
>      struct p2m_domain *hostp2m, *p2m;
>      int rc;
> 
> -    ASSERT(idx < MAX_ALTP2M);
> +    ASSERT(idx < d->nr_altp2m);
> 
> -    p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
> +    p2m = d->arch.altp2m_p2m[idx];

Same here.

>      hostp2m = p2m_get_hostp2m(d);
> 
>      p2m_lock(p2m);
> @@ -388,12 +407,12 @@ int p2m_init_altp2m_by_id(struct domain *d, unsigned 
> int idx)
>      int rc = -EINVAL;
>      struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
> 
> -    if ( idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) )
> +    if ( idx >= d->nr_altp2m )
>          return rc;
> 
>      altp2m_list_lock(d);
> 
> -    if ( d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] ==
> +    if ( d->arch.altp2m_eptp[array_index_nospec(idx, d->nr_altp2m)] ==
>           mfn_x(INVALID_MFN) )
>          rc = p2m_activate_altp2m(d, idx, hostp2m->default_access);
> 
> @@ -415,7 +434,7 @@ int p2m_init_next_altp2m(struct domain *d, uint16_t *idx,
> 
>      altp2m_list_lock(d);
> 
> -    for ( i = 0; i < MAX_ALTP2M; i++ )
> +    for ( i = 0; i < d->nr_altp2m; i++ )
>      {
>          if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
>              continue;
> @@ -437,7 +456,7 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned 
> int idx)
>      struct p2m_domain *p2m;
>      int rc = -EBUSY;
> 
> -    if ( !idx || idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) )
> +    if ( !idx || idx >= d->nr_altp2m )
>          return rc;
> 
>      rc = domain_pause_except_self(d);
> @@ -447,17 +466,17 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned 
> int idx)
>      rc = -EBUSY;
>      altp2m_list_lock(d);
> 
> -    if ( d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] !=
> +    if ( d->arch.altp2m_eptp[array_index_nospec(idx, d->nr_altp2m)] !=
>           mfn_x(INVALID_MFN) )
>      {
> -        p2m = array_access_nospec(d->arch.altp2m_p2m, idx);
> +        p2m = d->arch.altp2m_p2m[array_index_nospec(idx, d->nr_altp2m)];
> 
>          if ( !_atomic_read(p2m->active_vcpus) )
>          {
>              p2m_reset_altp2m(d, idx, ALTP2M_DEACTIVATE);
> -            d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] =
> +            d->arch.altp2m_eptp[array_index_nospec(idx, d->nr_altp2m)] =
>                  mfn_x(INVALID_MFN);
> -            d->arch.altp2m_visible_eptp[array_index_nospec(idx, MAX_EPTP)] =
> +            d->arch.altp2m_visible_eptp[array_index_nospec(idx, 
> d->nr_altp2m)] =
>                  mfn_x(INVALID_MFN);
>              rc = 0;
>          }
> @@ -475,7 +494,7 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d, 
> unsigned int idx)
>      struct vcpu *v;
>      int rc = -EINVAL;
> 
> -    if ( idx >= MAX_ALTP2M )
> +    if ( idx >= d->nr_altp2m )
>          return rc;
> 
>      rc = domain_pause_except_self(d);
> @@ -510,13 +529,13 @@ int p2m_change_altp2m_gfn(struct domain *d, unsigned 
> int idx,
>      mfn_t mfn;
>      int rc = -EINVAL;
> 
> -    if ( idx >=  min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
> -         d->arch.altp2m_eptp[array_index_nospec(idx, MAX_EPTP)] ==
> +    if ( idx >= d->nr_altp2m ||
> +         d->arch.altp2m_eptp[array_index_nospec(idx, d->nr_altp2m)] ==
>           mfn_x(INVALID_MFN) )
>          return rc;
> 
>      hp2m = p2m_get_hostp2m(d);
> -    ap2m = array_access_nospec(d->arch.altp2m_p2m, idx);
> +    ap2m = d->arch.altp2m_p2m[array_index_nospec(idx, d->nr_altp2m)];
> 
>      p2m_lock(hp2m);
>      p2m_lock(ap2m);
> @@ -572,7 +591,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t 
> gfn,
> 
>      altp2m_list_lock(d);
> 
> -    for ( i = 0; i < MAX_ALTP2M; i++ )
> +    for ( i = 0; i < d->nr_altp2m; i++ )
>      {
>          p2m_type_t t;
>          p2m_access_t a;
> @@ -595,7 +614,7 @@ int p2m_altp2m_propagate_change(struct domain *d, gfn_t 
> gfn,
>              else
>              {
>                  /* At least 2 altp2m's impacted, so reset everything */
> -                for ( i = 0; i < MAX_ALTP2M; i++ )
> +                for ( i = 0; i < d->nr_altp2m; i++ )
>                  {
>                      if ( i == last_reset_idx ||
>                           d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
> @@ -659,12 +678,13 @@ int p2m_set_suppress_ve_multi(struct domain *d,
> 
>      if ( sve->view > 0 )
>      {
> -        if ( sve->view >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
> -             d->arch.altp2m_eptp[array_index_nospec(sve->view, MAX_EPTP)] ==
> +        if ( sve->view >= d->nr_altp2m ||
> +             d->arch.altp2m_eptp[array_index_nospec(sve->view, 
> d->nr_altp2m)] ==
>               mfn_x(INVALID_MFN) )
>              return -EINVAL;
> 
> -        p2m = ap2m = array_access_nospec(d->arch.altp2m_p2m, sve->view);
> +        p2m = ap2m =
> +            d->arch.altp2m_p2m[array_index_nospec(sve->view, d->nr_altp2m)];

This expression is so common that it might be helpful to introduce a
helper: altp2m_get_p2m() or similar that encapsulates it (in a
separate patch).

>      }
> 
>      p2m_lock(host_p2m);
> @@ -727,12 +747,13 @@ int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, 
> bool *suppress_ve,
> 
>      if ( altp2m_idx > 0 )
>      {
> -        if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
> -             d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
> +        if ( altp2m_idx >= d->nr_altp2m ||
> +             d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, 
> d->nr_altp2m)] ==
>               mfn_x(INVALID_MFN) )
>              return -EINVAL;
> 
> -        p2m = ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
> +        p2m = ap2m =
> +            d->arch.altp2m_p2m[array_index_nospec(altp2m_idx, d->nr_altp2m)];
>      }
>      else
>          p2m = host_p2m;
> @@ -754,7 +775,7 @@ int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool 
> *suppress_ve,
>      return rc;
>  }
> 
> -int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int altp2m_idx,
> +int p2m_set_altp2m_view_visibility(struct domain *d, unsigned int idx,
>                                     uint8_t visible)
>  {
>      int rc = 0;
> @@ -763,17 +784,17 @@ int p2m_set_altp2m_view_visibility(struct domain *d, 
> unsigned int altp2m_idx,
> 
>      /*
>       * Eptp index is correlated with altp2m index and should not exceed
> -     * min(MAX_ALTP2M, MAX_EPTP).
> +     * d->nr_altp2m.
>       */
> -    if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
> -         d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
> +    if ( idx >= d->nr_altp2m ||
> +         d->arch.altp2m_eptp[array_index_nospec(idx, d->nr_altp2m)] ==
>           mfn_x(INVALID_MFN) )
>          rc = -EINVAL;
>      else if ( visible )
> -        d->arch.altp2m_visible_eptp[array_index_nospec(altp2m_idx, 
> MAX_EPTP)] =
> -            d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)];
> +        d->arch.altp2m_visible_eptp[array_index_nospec(idx, d->nr_altp2m)] =
> +            d->arch.altp2m_eptp[array_index_nospec(idx, d->nr_altp2m)];
>      else
> -        d->arch.altp2m_visible_eptp[array_index_nospec(altp2m_idx, 
> MAX_EPTP)] =
> +        d->arch.altp2m_visible_eptp[array_index_nospec(idx, d->nr_altp2m)] =
>              mfn_x(INVALID_MFN);
> 
>      altp2m_list_unlock(d);
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index d2011fde24..501fd9848b 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -515,7 +515,7 @@ int hap_enable(struct domain *d, u32 mode)
>              d->arch.altp2m_visible_eptp[i] = mfn_x(INVALID_MFN);
>          }
> 
> -        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        for ( i = 0; i < d->nr_altp2m; i++ )
>          {
>              rv = p2m_alloc_table(d->arch.altp2m_p2m[i]);
>              if ( rv != 0 )
> @@ -538,7 +538,7 @@ void hap_final_teardown(struct domain *d)
>      unsigned int i;
> 
>      if ( hvm_altp2m_supported() )
> -        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        for ( i = 0; i < d->nr_altp2m; i++ )
>              p2m_teardown(d->arch.altp2m_p2m[i], true, NULL);
> 
>      /* Destroy nestedp2m's first */
> @@ -590,7 +590,7 @@ void hap_teardown(struct domain *d, bool *preempted)
>          FREE_XENHEAP_PAGE(d->arch.altp2m_eptp);
>          FREE_XENHEAP_PAGE(d->arch.altp2m_visible_eptp);
> 
> -        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        for ( i = 0; i < d->nr_altp2m; i++ )
>          {
>              p2m_teardown(d->arch.altp2m_p2m[i], false, preempted);
>              if ( preempted && *preempted )
> diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
> index 60a0cce68a..63bb2e10ed 100644
> --- a/xen/arch/x86/mm/mem_access.c
> +++ b/xen/arch/x86/mm/mem_access.c
> @@ -347,12 +347,12 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, 
> uint32_t nr,
>      /* altp2m view 0 is treated as the hostp2m */
>      if ( altp2m_idx )
>      {
> -        if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
> -             d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
> +        if ( altp2m_idx >= d->nr_altp2m ||
> +             d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, 
> d->nr_altp2m)] ==
>               mfn_x(INVALID_MFN) )
>              return -EINVAL;
> 
> -        ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
> +        ap2m = d->arch.altp2m_p2m[array_index_nospec(altp2m_idx, 
> d->nr_altp2m)];
>      }
> 
>      if ( !xenmem_access_to_p2m_access(p2m, access, &a) )
> @@ -403,12 +403,12 @@ long p2m_set_mem_access_multi(struct domain *d,
>      /* altp2m view 0 is treated as the hostp2m */
>      if ( altp2m_idx )
>      {
> -        if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
> -             d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
> -             mfn_x(INVALID_MFN) )
> +        if ( altp2m_idx >= d->nr_altp2m ||
> +             d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, 
> d->nr_altp2m)]
> +             == mfn_x(INVALID_MFN) )
>              return -EINVAL;
> 
> -        ap2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
> +        ap2m = d->arch.altp2m_p2m[array_index_nospec(altp2m_idx, 
> d->nr_altp2m)];
>      }
> 
>      p2m_lock(p2m);
> @@ -466,12 +466,12 @@ int p2m_get_mem_access(struct domain *d, gfn_t gfn, 
> xenmem_access_t *access,
>      }
>      else if ( altp2m_idx ) /* altp2m view 0 is treated as the hostp2m */
>      {
> -        if ( altp2m_idx >= min(ARRAY_SIZE(d->arch.altp2m_p2m), MAX_EPTP) ||
> -             d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, MAX_EPTP)] ==
> -             mfn_x(INVALID_MFN) )
> +        if ( altp2m_idx >= d->nr_altp2m ||
> +             d->arch.altp2m_eptp[array_index_nospec(altp2m_idx, 
> d->nr_altp2m)]
> +             == mfn_x(INVALID_MFN) )
>              return -EINVAL;
> 
> -        p2m = array_access_nospec(d->arch.altp2m_p2m, altp2m_idx);
> +        p2m = d->arch.altp2m_p2m[array_index_nospec(altp2m_idx, 
> d->nr_altp2m)];
>      }
> 
>      return _p2m_get_mem_access(p2m, gfn, access);
> @@ -486,7 +486,7 @@ void arch_p2m_set_access_required(struct domain *d, bool 
> access_required)
>      if ( altp2m_active(d) )
>      {
>          unsigned int i;
> -        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        for ( i = 0; i < d->nr_altp2m; i++ )
>          {
>              struct p2m_domain *p2m = d->arch.altp2m_p2m[i];
> 
> diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
> index da28266ef0..83bb9dd5df 100644
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -912,7 +912,7 @@ static int nominate_page(struct domain *d, gfn_t gfn,
> 
>          altp2m_list_lock(d);
> 
> -        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        for ( i = 0; i < d->nr_altp2m; i++ )
>          {
>              ap2m = d->arch.altp2m_p2m[i];
>              if ( !ap2m )
> diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
> index f83610cb8c..42b868ca45 100644
> --- a/xen/arch/x86/mm/p2m-ept.c
> +++ b/xen/arch/x86/mm/p2m-ept.c
> @@ -1293,7 +1293,7 @@ static void ept_set_ad_sync(struct domain *d, bool 
> value)
>      {
>          unsigned int i;
> 
> -        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        for ( i = 0; i < d->nr_altp2m; i++ )
>          {
>              struct p2m_domain *p2m;
> 
> @@ -1500,15 +1500,17 @@ void setup_ept_dump(void)
> 
>  void p2m_init_altp2m_ept(struct domain *d, unsigned int i)
>  {
> -    struct p2m_domain *p2m = array_access_nospec(d->arch.altp2m_p2m, i);
> +    struct p2m_domain *p2m =
> +        d->arch.altp2m_p2m[array_index_nospec(i, d->nr_altp2m)];
>      struct p2m_domain *hostp2m = p2m_get_hostp2m(d);
>      struct ept_data *ept;
> 
>      p2m->ept.ad = hostp2m->ept.ad;
>      ept = &p2m->ept;
>      ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
> -    d->arch.altp2m_eptp[array_index_nospec(i, MAX_EPTP)] = ept->eptp;
> -    d->arch.altp2m_visible_eptp[array_index_nospec(i, MAX_EPTP)] = ept->eptp;
> +    d->arch.altp2m_eptp[array_index_nospec(i, d->nr_altp2m)] = ept->eptp;
> +    d->arch.altp2m_visible_eptp[array_index_nospec(i, d->nr_altp2m)] =
> +        ept->eptp;
>  }
> 
>  unsigned int p2m_find_altp2m_by_eptp(struct domain *d, uint64_t eptp)
> @@ -1519,7 +1521,7 @@ unsigned int p2m_find_altp2m_by_eptp(struct domain *d, 
> uint64_t eptp)
> 
>      altp2m_list_lock(d);
> 
> -    for ( i = 0; i < MAX_ALTP2M; i++ )
> +    for ( i = 0; i < d->nr_altp2m; i++ )
>      {
>          if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) )
>              continue;
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index db5d9b6c2a..549aec8d6b 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -105,7 +105,7 @@ void p2m_change_entry_type_global(struct domain *d,
>      {
>          unsigned int i;
> 
> -        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        for ( i = 0; i < d->nr_altp2m; i++ )
>          {
>              if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
>              {
> @@ -140,7 +140,7 @@ void p2m_memory_type_changed(struct domain *d)
>      {
>          unsigned int i;
> 
> -        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        for ( i = 0; i < d->nr_altp2m; i++ )
>          {
>              if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
>              {
> @@ -913,7 +913,7 @@ void p2m_change_type_range(struct domain *d,
>      {
>          unsigned int i;
> 
> -        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        for ( i = 0; i < d->nr_altp2m; i++ )
>          {
>              if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
>              {
> @@ -986,7 +986,7 @@ int p2m_finish_type_change(struct domain *d,
>      {
>          unsigned int i;
> 
> -        for ( i = 0; i < MAX_ALTP2M; i++ )
> +        for ( i = 0; i < d->nr_altp2m; i++ )
>          {
>              if ( d->arch.altp2m_eptp[i] != mfn_x(INVALID_MFN) )
>              {
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index 6773f7fb90..a10a70e9d4 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -610,6 +610,7 @@ struct domain *domain_create(domid_t domid,
>      if ( config )
>      {
>          d->options = config->flags;
> +        d->nr_altp2m = config->nr_altp2m;
>          d->vmtrace_size = config->vmtrace_size;
>      }
> 
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> index a33f9ec32b..60a871f123 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -21,7 +21,7 @@
>  #include "hvm/save.h"
>  #include "memory.h"
> 
> -#define XEN_DOMCTL_INTERFACE_VERSION 0x00000016
> +#define XEN_DOMCTL_INTERFACE_VERSION 0x00000017
> 
>  /*
>   * NB. xen_domctl.domain is an IN/OUT parameter for this operation.
> @@ -86,6 +86,9 @@ struct xen_domctl_createdomain {
> 
>      uint32_t grant_opts;
> 
> +    /* Number of altp2ms to allocate. */
> +    uint32_t nr_altp2m;

I've got a colliding patch that introduces altp2m_opts, and uses the
bottom 2 bits to signal the altp2m mode.  On EPT this is limited to
512 altp2m, so using 16bits (from the high part of altp2m_opts) should
be enough, hopefully also for other architectures.

> +
>      /* Per-vCPU buffer size in bytes.  0 to disable. */
>      uint32_t vmtrace_size;
> 
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index 132b841995..18cc0748a1 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -602,6 +602,8 @@ struct domain
>          unsigned int guest_request_sync          : 1;
>      } monitor;
> 
> +    unsigned int nr_altp2m;    /* Number of altp2m tables */

Shouldn't this new field be placed in arch_domain with the rest of the
altp2m fields?  I know that number of altp2m is an option that's
suitable to be shared between all architectures that have altp2m
implementations, but it's IMO better if it's grouped together with the
rest of the altp2m fields for consistency.

Thanks, Roger.



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.