[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 2/8] x86: handle CQM resource when creating/destroying guests


  • To: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
  • From: "Xu, Dongxiao" <dongxiao.xu@xxxxxxxxx>
  • Date: Mon, 25 Nov 2013 03:21:29 +0000
  • Accept-language: en-US
  • Cc: "xen-devel@xxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxx>
  • Delivery-date: Mon, 25 Nov 2013 03:21:57 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xen.org>
  • Thread-index: AQHO5rYBqthOze5YnUGhrTsWVsjVDpo1SVmw
  • Thread-topic: [Xen-devel] [PATCH v2 2/8] x86: handle CQM resource when creating/destroying guests

> -----Original Message-----
> From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx]
> Sent: Thursday, November 21, 2013 8:34 PM
> To: Xu, Dongxiao
> Cc: xen-devel@xxxxxxxxxxxxx
> Subject: Re: [Xen-devel] [PATCH v2 2/8] x86: handle CQM resource when
> creating/destroying guests
> 
> On 21/11/13 07:20, dongxiao.xu@xxxxxxxxx wrote:
> > From: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
> >
> > Allocate an RMID for a guest when it is created. This per-guest
> > RMID will be used to monitor Cache QoS related data. The RMID will
> > be relinquished when guest is destroyed.
> >
> > Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx>
> > Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
> > ---
> >  xen/arch/x86/domain.c        |    9 +++++++
> >  xen/arch/x86/pqos.c          |   59
> ++++++++++++++++++++++++++++++++++++++++++
> >  xen/common/domctl.c          |    5 +++-
> >  xen/include/asm-x86/domain.h |    2 ++
> >  xen/include/asm-x86/pqos.h   |    4 +++
> >  xen/include/public/domctl.h  |    3 +++
> >  xen/include/xen/sched.h      |    3 +++
> >  7 files changed, 84 insertions(+), 1 deletion(-)
> >
> > diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> > index a3868f9..9725649 100644
> > --- a/xen/arch/x86/domain.c
> > +++ b/xen/arch/x86/domain.c
> > @@ -60,6 +60,7 @@
> >  #include <xen/numa.h>
> >  #include <xen/iommu.h>
> >  #include <compat/vcpu.h>
> > +#include <asm/pqos.h>
> >
> >  DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
> >  DEFINE_PER_CPU(unsigned long, cr4);
> > @@ -579,6 +580,11 @@ int arch_domain_create(struct domain *d, unsigned int
> domcr_flags)
> >      tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0);
> >      spin_lock_init(&d->arch.vtsc_lock);
> >
> > +    /* Allocate CQM RMID for guest */
> > +    d->arch.pqos_cqm_rmid = 0;
> > +    if ( system_supports_cqm() && !!(domcr_flags & DOMCRF_pqos_cqm) )
> 
> The !! is redundant here as far as the logical test goes.

OK, I will remove it.

> 
> > +        alloc_cqm_rmid(d);
> > +
> >      return 0;
> >
> >   fail:
> > @@ -612,6 +618,9 @@ void arch_domain_destroy(struct domain *d)
> >
> >      free_xenheap_page(d->shared_info);
> >      cleanup_domain_irq_mapping(d);
> > +
> > +    if ( system_supports_cqm() )
> 
> You can remove this conditional if ... (See free_cqm_rmid())

OK.

> 
> > +        free_cqm_rmid(d);
> >  }
> >
> >  unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long
> guest_cr4)
> > diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c
> > index e6ab416..e294799 100644
> > --- a/xen/arch/x86/pqos.c
> > +++ b/xen/arch/x86/pqos.c
> > @@ -20,6 +20,8 @@
> >   */
> >  #include <asm/processor.h>
> >  #include <xen/init.h>
> > +#include <xen/spinlock.h>
> > +#include <xen/sched.h>
> >  #include <asm/pqos.h>
> >
> >  static bool_t pqos_enabled = 1;
> > @@ -29,6 +31,7 @@ unsigned int cqm_res_count = 0;
> >  unsigned int cqm_upscaling_factor = 0;
> >  bool_t cqm_enabled = 0;
> >  struct cqm_res_struct *cqm_res_array = NULL;
> > +static DEFINE_SPINLOCK(cqm_lock);
> >
> >  static void __init init_cqm(void)
> >  {
> > @@ -78,6 +81,62 @@ void __init init_platform_qos(void)
> >      init_qos_monitor();
> >  }
> >
> > +bool_t system_supports_cqm(void)
> > +{
> > +    return cqm_enabled;
> > +}
> > +
> > +int alloc_cqm_rmid(struct domain *d)
> > +{
> > +    int rmid, rc = 0;
> > +    unsigned long flags;
> > +
> > +    ASSERT(system_supports_cqm());
> > +
> > +    spin_lock_irqsave(&cqm_lock, flags);
> > +    /* RMID=0 is reserved, enumerate from 1 */
> > +    for ( rmid = 1; rmid < cqm_res_count; rmid++ )
> > +    {
> > +        if ( cqm_res_array[rmid].inuse)
> > +            continue;
> > +
> > +        cqm_res_array[rmid].inuse = 1;
> > +        cqm_res_array[rmid].domain_id = d->domain_id;
> > +        break;
> > +    }
> > +    spin_unlock_irqrestore(&cqm_lock, flags);
> > +
> > +    /* No CQM RMID available, assign RMID=0 by default */
> > +    if ( rmid == cqm_res_count )
> > +    {
> > +        rmid = 0;
> > +        rc = -1;
> > +    }
> > +
> > +    d->arch.pqos_cqm_rmid = rmid;
> > +
> > +    return rc;
> > +}
> > +
> > +void free_cqm_rmid(struct domain *d)
> > +{
> > +    int rmid = d->arch.pqos_cqm_rmid;
> > +    unsigned long flags;
> > +
> > +    ASSERT(system_supports_cqm());
> 
> ... you remove this assertion and have a return early if rmid is 0.

OK.

> 
> > +
> > +    spin_lock_irqsave(&cqm_lock, flags);
> > +    /* We do not free system reserved "RMID=0" */
> > +    if ( rmid > 0 )
> > +    {
> > +        cqm_res_array[rmid].inuse = 0;
> > +        cqm_res_array[rmid].domain_id = 0;
> 
> Would DOMID_INVALID be more appropriate here? 0 is valid domain
> identifier.  It would also mean that you could remove the inuse flag
> from the structure, and the structure itself degrades to an array of
> domid_t's
> 
> You can then further use cmpxchg() and avoid the spinlock.
> 
> I guess this all depends on whether you are expecting to add new
> information into the structure or not.

Per my understanding, DOMID_xxx is somewhat related with memory management, 
e.g., DOMID_INVALID is used to identify pages with unknown owner. Is it 
appropriate to use it in CQM feature?

According to your proposal:
 - DOMID_INVALID is for RMIDs that are not allocated yet;
 - A valid domain number stands for the RMID is used for a certain domain;
 - Maybe DOMID_SELF or DOMID_XEN for the system reserved RMID=0?

Do you think it is OK if we introduce extra meanings (CQM specific) for those 
macros?

> 
> > +    }
> > +    spin_unlock_irqrestore(&cqm_lock, flags);
> > +
> > +    d->arch.pqos_cqm_rmid = 0;
> > +}
> > +
> >  /*
> >   * Local variables:
> >   * mode: C
> > diff --git a/xen/common/domctl.c b/xen/common/domctl.c
> > index 904d27b..1c2e320 100644
> > --- a/xen/common/domctl.c
> > +++ b/xen/common/domctl.c
> > @@ -425,7 +425,8 @@ long
> do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> >                 | XEN_DOMCTL_CDF_pvh_guest
> >                 | XEN_DOMCTL_CDF_hap
> >                 | XEN_DOMCTL_CDF_s3_integrity
> > -               | XEN_DOMCTL_CDF_oos_off)) )
> > +               | XEN_DOMCTL_CDF_oos_off
> > +               | XEN_DOMCTL_CDF_pqos_cqm)) )
> >              break;
> >
> >          dom = op->domain;
> > @@ -467,6 +468,8 @@ long
> do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> >              domcr_flags |= DOMCRF_s3_integrity;
> >          if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_oos_off )
> >              domcr_flags |= DOMCRF_oos_off;
> > +        if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_pqos_cqm )
> > +            domcr_flags |= DOMCRF_pqos_cqm;
> >
> >          d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref);
> >          if ( IS_ERR(d) )
> > diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
> > index 9d39061..b0479aa 100644
> > --- a/xen/include/asm-x86/domain.h
> > +++ b/xen/include/asm-x86/domain.h
> > @@ -313,6 +313,8 @@ struct arch_domain
> >      spinlock_t e820_lock;
> >      struct e820entry *e820;
> >      unsigned int nr_e820;
> > +
> > +    int pqos_cqm_rmid;       /* CQM RMID assigned to the domain */
> >  } __cacheline_aligned;
> >
> >  #define has_arch_pdevs(d)    (!list_empty(&(d)->arch.pdev_list))
> > diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h
> > index 934d68a..88de139 100644
> > --- a/xen/include/asm-x86/pqos.h
> > +++ b/xen/include/asm-x86/pqos.h
> > @@ -34,4 +34,8 @@ struct cqm_res_struct {
> >
> >  void init_platform_qos(void);
> >
> > +bool_t system_supports_cqm(void);
> > +int alloc_cqm_rmid(struct domain *);
> > +void free_cqm_rmid(struct domain *);
> 
> Please use a variable in the function declaration.  "d" would suffice.

OK.

> 
> ~Andrew
> 
> > +
> >  #endif
> > diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> > index 01a3652..47a850a 100644
> > --- a/xen/include/public/domctl.h
> > +++ b/xen/include/public/domctl.h
> > @@ -62,6 +62,9 @@ struct xen_domctl_createdomain {
> >   /* Is this a PVH guest (as opposed to an HVM or PV guest)? */
> >  #define _XEN_DOMCTL_CDF_pvh_guest     4
> >  #define XEN_DOMCTL_CDF_pvh_guest
> (1U<<_XEN_DOMCTL_CDF_pvh_guest)
> > + /* Enable pqos-cqm? */
> > +#define _XEN_DOMCTL_CDF_pqos_cqm      5
> > +#define XEN_DOMCTL_CDF_pqos_cqm
> (1U<<_XEN_DOMCTL_CDF_pqos_cqm)
> >      uint32_t flags;
> >  };
> >  typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
> > diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> > index cbdf377..3a42656 100644
> > --- a/xen/include/xen/sched.h
> > +++ b/xen/include/xen/sched.h
> > @@ -507,6 +507,9 @@ struct domain *domain_create(
> >   /* DOMCRF_pvh: Create PV domain in HVM container. */
> >  #define _DOMCRF_pvh             5
> >  #define DOMCRF_pvh              (1U<<_DOMCRF_pvh)
> > + /* DOMCRF_pqos_cqm: Create a domain with CQM support */
> > +#define _DOMCRF_pqos_cqm        6
> > +#define DOMCRF_pqos_cqm         (1U<<_DOMCRF_pqos_cqm)
> >
> >  /*
> >   * rcu_lock_domain_by_id() is more efficient than get_domain_by_id().


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.