[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2/8] x86: handle CQM resource when creating/destroying guests
> -----Original Message----- > From: Andrew Cooper [mailto:andrew.cooper3@xxxxxxxxxx] > Sent: Wednesday, November 20, 2013 7:33 PM > To: Xu, Dongxiao > Cc: xen-devel@xxxxxxxxxxxxx > Subject: Re: [Xen-devel] [PATCH 2/8] x86: handle CQM resource when > creating/destroying guests > > On 20/11/13 03:27, dongxiao.xu@xxxxxxxxx wrote: > > From: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> > > > > Allocate an RMID for a guest when it is created. This per-guest > > RMID will be used to monitor Cache QoS related data. The RMID will > > be relinquished when guest is destroyed. > > > > Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx> > > Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> > > --- > > xen/arch/x86/domain.c | 12 +++++++++ > > xen/arch/x86/pqos.c | 58 > ++++++++++++++++++++++++++++++++++++++++++ > > xen/common/domctl.c | 5 +++- > > xen/include/asm-x86/domain.h | 2 ++ > > xen/include/asm-x86/pqos.h | 4 +++ > > xen/include/public/domctl.h | 3 +++ > > xen/include/xen/sched.h | 3 +++ > > 7 files changed, 86 insertions(+), 1 deletion(-) > > > > diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c > > index a3868f9..5f920cd 100644 > > --- a/xen/arch/x86/domain.c > > +++ b/xen/arch/x86/domain.c > > @@ -60,6 +60,7 @@ > > #include <xen/numa.h> > > #include <xen/iommu.h> > > #include <compat/vcpu.h> > > +#include <asm/pqos.h> > > > > DEFINE_PER_CPU(struct vcpu *, curr_vcpu); > > DEFINE_PER_CPU(unsigned long, cr4); > > @@ -579,6 +580,14 @@ int arch_domain_create(struct domain *d, unsigned int > domcr_flags) > > tsc_set_info(d, TSC_MODE_DEFAULT, 0UL, 0, 0); > > spin_lock_init(&d->arch.vtsc_lock); > > > > + /* Allocate CQM RMID for guest */ > > + if ( !!(domcr_flags & DOMCRF_pqos_cqm) ) > > + d->arch.pqos_cqm_rmid = alloc_cqm_resource(d->domain_id); > > + else if ( system_support_cqm() ) > > + d->arch.pqos_cqm_rmid = 0; > > + else > > + d->arch.pqos_cqm_rmid = -1; > > + > > This logic looks overly-complicated, and calling alloc_cqm_resource() > before checking system_support_cqm() seems the wrong way around. OK. > > > return 0; > > > > fail: > > @@ -612,6 +621,9 @@ void arch_domain_destroy(struct domain *d) > > > > free_xenheap_page(d->shared_info); > > cleanup_domain_irq_mapping(d); > > + > > + free_cqm_resource(d->domain_id); > > + d->arch.pqos_cqm_rmid = 0; > > } > > > > unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long > guest_cr4) > > diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c > > index 617cce1..895d892 100644 > > --- a/xen/arch/x86/pqos.c > > +++ b/xen/arch/x86/pqos.c > > @@ -20,6 +20,7 @@ > > */ > > #include <asm/processor.h> > > #include <xen/init.h> > > +#include <xen/spinlock.h> > > #include <asm/pqos.h> > > > > static bool_t pqos_enabled = 1; > > @@ -29,6 +30,7 @@ unsigned int cqm_res_count = 0; > > unsigned int cqm_upscaling_factor = 0; > > bool_t cqm_enabled = 0; > > struct cqm_res_struct *cqm_res_array = NULL; > > +static DEFINE_SPINLOCK(cqm_lock); > > > > static void init_cqm(void) > > { > > @@ -78,6 +80,62 @@ void init_platform_qos(void) > > init_qos_monitor(); > > } > > > > +bool_t system_support_cqm(void) > > "system_supports_cqm" OK > > > +{ > > + return cqm_enabled; > > +} > > + > > +int alloc_cqm_resource(domid_t domain_id) > > Perhaps alloc_rmid() given its current semantics? > > > +{ > > + int i, rmid = -1; > > + unsigned long flags; > > + > > + if ( !system_support_cqm() ) > > + goto out; > > Probably better as an ASSERT(). The caller should really not try to > allocate cqm resources if the system doesn't have cqm enabled. OK. > > > + > > + spin_lock_irqsave(&cqm_lock, flags); > > + for ( i = 0; i < cqm_res_count; i++ ) > > + { > > + if ( cqm_res_array[i].inuse) > > + continue; > > + > > + rmid = i; > > + cqm_res_array[rmid].inuse = 1; > > + cqm_res_array[rmid].domain_id = domain_id; > > + break; > > + } > > + spin_unlock_irqrestore(&cqm_lock, flags); > > + > > + /* No CQM RMID available, assign RMID=0 by default */ > > + if ( rmid == -1 ) > > + rmid = 0; > > + > > +out: > > + return rmid; > > +} > > + > > +void free_cqm_resource(domid_t domain_id) > > +{ > > + int rmid; > > + unsigned long flags; > > + > > + if ( !system_support_cqm() ) > > + return; > > + > > + spin_lock_irqsave(&cqm_lock, flags); > > + /* CQM RMID 0 is reserved, enumerate from 1 */ > > + for ( rmid = 1; rmid < cqm_res_count; rmid++ ) > > + { > > + if ( cqm_res_array[rmid].domain_id != domain_id ) > > + continue; > > + > > + cqm_res_array[rmid].inuse = 0; > > + cqm_res_array[rmid].domain_id = 0; > > + break; > > + } > > + spin_unlock_irqrestore(&cqm_lock, flags); > > Can you not use d->arch.pqos_cqm_rmid here to save the search ? This needs to pass the "d" as a parameter, but seems a good idea. Will reflect above suggestions in next version patch. Thanks, Dongxiao > > > +} > > + > > /* > > * Local variables: > > * mode: C > > diff --git a/xen/common/domctl.c b/xen/common/domctl.c > > index 904d27b..1c2e320 100644 > > --- a/xen/common/domctl.c > > +++ b/xen/common/domctl.c > > @@ -425,7 +425,8 @@ long > do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) > > | XEN_DOMCTL_CDF_pvh_guest > > | XEN_DOMCTL_CDF_hap > > | XEN_DOMCTL_CDF_s3_integrity > > - | XEN_DOMCTL_CDF_oos_off)) ) > > + | XEN_DOMCTL_CDF_oos_off > > + | XEN_DOMCTL_CDF_pqos_cqm)) ) > > break; > > > > dom = op->domain; > > @@ -467,6 +468,8 @@ long > do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl) > > domcr_flags |= DOMCRF_s3_integrity; > > if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_oos_off ) > > domcr_flags |= DOMCRF_oos_off; > > + if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_pqos_cqm ) > > + domcr_flags |= DOMCRF_pqos_cqm; > > > > d = domain_create(dom, domcr_flags, op->u.createdomain.ssidref); > > if ( IS_ERR(d) ) > > diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h > > index 9d39061..b0479aa 100644 > > --- a/xen/include/asm-x86/domain.h > > +++ b/xen/include/asm-x86/domain.h > > @@ -313,6 +313,8 @@ struct arch_domain > > spinlock_t e820_lock; > > struct e820entry *e820; > > unsigned int nr_e820; > > + > > + int pqos_cqm_rmid; /* CQM RMID assigned to the domain */ > > } __cacheline_aligned; > > > > #define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list)) > > diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h > > index 2184ea9..7e32fa5 100644 > > --- a/xen/include/asm-x86/pqos.h > > +++ b/xen/include/asm-x86/pqos.h > > @@ -34,4 +34,8 @@ struct cqm_res_struct { > > > > void init_platform_qos(void); > > > > +bool_t system_support_cqm(void); > > +int alloc_cqm_resource(domid_t); > > +void free_cqm_resource(domid_t); > > + > > #endif > > diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h > > index 01a3652..47a850a 100644 > > --- a/xen/include/public/domctl.h > > +++ b/xen/include/public/domctl.h > > @@ -62,6 +62,9 @@ struct xen_domctl_createdomain { > > /* Is this a PVH guest (as opposed to an HVM or PV guest)? */ > > #define _XEN_DOMCTL_CDF_pvh_guest 4 > > #define XEN_DOMCTL_CDF_pvh_guest > (1U<<_XEN_DOMCTL_CDF_pvh_guest) > > + /* Enable pqos-cqm? */ > > +#define _XEN_DOMCTL_CDF_pqos_cqm 5 > > +#define XEN_DOMCTL_CDF_pqos_cqm > (1U<<_XEN_DOMCTL_CDF_pqos_cqm) > > uint32_t flags; > > }; > > typedef struct xen_domctl_createdomain xen_domctl_createdomain_t; > > diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h > > index cbdf377..3a42656 100644 > > --- a/xen/include/xen/sched.h > > +++ b/xen/include/xen/sched.h > > @@ -507,6 +507,9 @@ struct domain *domain_create( > > /* DOMCRF_pvh: Create PV domain in HVM container. */ > > #define _DOMCRF_pvh 5 > > #define DOMCRF_pvh (1U<<_DOMCRF_pvh) > > + /* DOMCRF_pqos_cqm: Create a domain with CQM support */ > > +#define _DOMCRF_pqos_cqm 6 > > +#define DOMCRF_pqos_cqm (1U<<_DOMCRF_pqos_cqm) > > > > /* > > * rcu_lock_domain_by_id() is more efficient than get_domain_by_id(). _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |