[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 2/2] sysctl/libxl: choose a sane default for HAP



> -----Original Message-----
> From: Xen-devel <xen-devel-bounces@xxxxxxxxxxxxxxxxxxxx> On Behalf Of Paul 
> Durrant
> Sent: 05 September 2019 14:52
> To: Roger Pau Monne <roger.pau@xxxxxxxxxx>; xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Stefano Stabellini <sstabellini@xxxxxxxxxx>; Wei Liu <wl@xxxxxxx>; Konrad 
> Rzeszutek Wilk
> <konrad.wilk@xxxxxxxxxx>; Andrew Cooper <Andrew.Cooper3@xxxxxxxxxx>; Tim 
> (Xen.org) <tim@xxxxxxx>;
> George Dunlap <George.Dunlap@xxxxxxxxxx>; Julien Grall 
> <julien.grall@xxxxxxx>; Jan Beulich
> <jbeulich@xxxxxxxx>; Anthony Perard <anthony.perard@xxxxxxxxxx>; Ian Jackson 
> <Ian.Jackson@xxxxxxxxxx>;
> Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>; Roger Pau Monne 
> <roger.pau@xxxxxxxxxx>
> Subject: Re: [Xen-devel] [PATCH v2 2/2] sysctl/libxl: choose a sane default 
> for HAP
> 
> > -----Original Message-----
> > From: Roger Pau Monne <roger.pau@xxxxxxxxxx>
> > Sent: 05 September 2019 14:27
> > To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> > Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>; Ian Jackson 
> > <Ian.Jackson@xxxxxxxxxx>; Wei Liu
> > <wl@xxxxxxx>; Anthony Perard <anthony.perard@xxxxxxxxxx>; Andrew Cooper 
> > <Andrew.Cooper3@xxxxxxxxxx>;
> > George Dunlap <George.Dunlap@xxxxxxxxxx>; Jan Beulich <jbeulich@xxxxxxxx>; 
> > Julien Grall
> > <julien.grall@xxxxxxx>; Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>; 
> > Stefano Stabellini
> > <sstabellini@xxxxxxxxxx>; Tim (Xen.org) <tim@xxxxxxx>; Volodymyr Babchuk
> <Volodymyr_Babchuk@xxxxxxxx>;
> > Paul Durrant <Paul.Durrant@xxxxxxxxxx>
> > Subject: [PATCH v2 2/2] sysctl/libxl: choose a sane default for HAP
> >
> > Current libxl code will always enable Hardware Assisted Paging (HAP),
> > expecting that the hypervisor will fallback to shadow if HAP is not
> > available. With the changes to the domain builder that's not the case
> > any longer, and the hypervisor will raise an error if HAP is not
> > available instead of silently falling back to shadow.
> >
> > In order to keep the previous functionality report whether HAP is
> > available or not in XEN_SYSCTL_physinfo, so that the toolstack can
> > select a sane default if there's no explicit user selection of whether
> > HAP should be used.
> >
> > Note that on ARM hardware HAP capability is always reported since it's
> > a required feature in order to run Xen.
> >
> > Fixes: d0c0ba7d3de ('x86/hvm/domain: remove the 'hap_enabled' flag')
> > Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> 
> LGTM
> 
> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> 
> > ---
> > Cc: Paul Durrant <Paul.Durrant@xxxxxxxxxx>
> > ---
> > Changes since v1:
> >  - Also report HAP capability for ARM.
> >  - Print hap capability in xl info.
> > ---
> >  tools/libxl/libxl.c         | 1 +
> >  tools/libxl/libxl_create.c  | 8 +++++++-
> >  tools/libxl/libxl_types.idl | 1 +
> >  tools/xl/xl_info.c          | 5 +++--
> >  xen/arch/arm/sysctl.c       | 2 +-
> >  xen/arch/x86/sysctl.c       | 2 ++
> >  xen/include/public/sysctl.h | 4 ++++
> >  7 files changed, 19 insertions(+), 4 deletions(-)
> >
> > diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
> > index ec71574e99..5c0fcf320e 100644
> > --- a/tools/libxl/libxl.c
> > +++ b/tools/libxl/libxl.c
> > @@ -399,6 +399,7 @@ int libxl_get_physinfo(libxl_ctx *ctx, libxl_physinfo 
> > *physinfo)
> >      physinfo->cap_pv = !!(xcphysinfo.capabilities & XEN_SYSCTL_PHYSCAP_pv);
> >      physinfo->cap_hvm_directio =
> >          !!(xcphysinfo.capabilities & XEN_SYSCTL_PHYSCAP_directio);
> > +    physinfo->cap_hap = !!(xcphysinfo.capabilities & 
> > XEN_SYSCTL_PHYSCAP_hap);
> >
> >      GC_FREE;
> >      return 0;
> > diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
> > index 03ce166f4f..6a556dea8f 100644
> > --- a/tools/libxl/libxl_create.c
> > +++ b/tools/libxl/libxl_create.c
> > @@ -38,7 +38,13 @@ int libxl__domain_create_info_setdefault(libxl__gc *gc,
> >      libxl__arch_domain_create_info_setdefault(gc, c_info);
> >
> >      if (c_info->type != LIBXL_DOMAIN_TYPE_PV) {
> > -        libxl_defbool_setdefault(&c_info->hap, true);
> > +        libxl_physinfo info;
> > +        int rc = libxl_get_physinfo(CTX, &info);
> > +
> > +        if (rc)
> > +            return rc;
> > +
> > +        libxl_defbool_setdefault(&c_info->hap, info.cap_hap);
> >          libxl_defbool_setdefault(&c_info->oos, true);
> >      }
> >
> > diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
> > index b61399ce36..9e1f8515d3 100644
> > --- a/tools/libxl/libxl_types.idl
> > +++ b/tools/libxl/libxl_types.idl
> > @@ -1025,6 +1025,7 @@ libxl_physinfo = Struct("physinfo", [
> >      ("cap_hvm", bool),
> >      ("cap_pv", bool),
> >      ("cap_hvm_directio", bool), # No longer HVM specific
> > +    ("cap_hap", bool),

Actually Julien's review of one of my patches points out that this idl change 
should be accompanied by an associated LIBXL_HAVE_CAP_HAP definition.

  Paul

> >      ], dir=DIR_OUT)
> >
> >  libxl_connectorinfo = Struct("connectorinfo", [
> > diff --git a/tools/xl/xl_info.c b/tools/xl/xl_info.c
> > index 46d9c9f712..aa6724bc7f 100644
> > --- a/tools/xl/xl_info.c
> > +++ b/tools/xl/xl_info.c
> > @@ -210,11 +210,12 @@ static void output_physinfo(void)
> >           info.hw_cap[4], info.hw_cap[5], info.hw_cap[6], info.hw_cap[7]
> >          );
> >
> > -    maybe_printf("virt_caps              :%s%s%s%s\n",
> > +    maybe_printf("virt_caps              :%s%s%s%s%s\n",
> >           info.cap_pv ? " pv" : "",
> >           info.cap_hvm ? " hvm" : "",
> >           info.cap_hvm && info.cap_hvm_directio ? " hvm_directio" : "",
> > -         info.cap_pv && info.cap_hvm_directio ? " pv_directio" : ""
> > +         info.cap_pv && info.cap_hvm_directio ? " pv_directio" : "",
> > +         info.cap_hap ? " hap" : ""
> >          );
> >
> >      vinfo = libxl_get_version_info(ctx);
> > diff --git a/xen/arch/arm/sysctl.c b/xen/arch/arm/sysctl.c
> > index 92ac99c928..f87944e847 100644
> > --- a/xen/arch/arm/sysctl.c
> > +++ b/xen/arch/arm/sysctl.c
> > @@ -14,7 +14,7 @@
> >
> >  void arch_do_physinfo(struct xen_sysctl_physinfo *pi)
> >  {
> > -    pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
> > +    pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm | XEN_SYSCTL_PHYSCAP_hap;
> >  }
> >
> >  long arch_do_sysctl(struct xen_sysctl *sysctl,
> > diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c
> > index 7ec6174e6b..5777a05ffc 100644
> > --- a/xen/arch/x86/sysctl.c
> > +++ b/xen/arch/x86/sysctl.c
> > @@ -163,6 +163,8 @@ void arch_do_physinfo(struct xen_sysctl_physinfo *pi)
> >          pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm;
> >      if ( IS_ENABLED(CONFIG_PV) )
> >          pi->capabilities |= XEN_SYSCTL_PHYSCAP_pv;
> > +    if ( hvm_hap_supported() )
> > +        pi->capabilities |= XEN_SYSCTL_PHYSCAP_hap;
> >  }
> >
> >  long arch_do_sysctl(
> > diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
> > index 36b3f8c429..e02d7ce4c6 100644
> > --- a/xen/include/public/sysctl.h
> > +++ b/xen/include/public/sysctl.h
> > @@ -90,6 +90,10 @@ struct xen_sysctl_tbuf_op {
> >   /* The platform supports direct access to I/O devices with IOMMU. */
> >  #define _XEN_SYSCTL_PHYSCAP_directio     2
> >  #define XEN_SYSCTL_PHYSCAP_directio  (1u<<_XEN_SYSCTL_PHYSCAP_directio)
> > +/* The platform supports Hardware Assisted Paging. */
> > +#define _XEN_SYSCTL_PHYSCAP_hap          3
> > +#define XEN_SYSCTL_PHYSCAP_hap           (1u<<_XEN_SYSCTL_PHYSCAP_hap)
> > +
> >  struct xen_sysctl_physinfo {
> >      uint32_t threads_per_core;
> >      uint32_t cores_per_socket;
> > --
> > 2.22.0
> 
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxxx
> https://lists.xenproject.org/mailman/listinfo/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.