[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 6/8] x86: get per domain CQM information
On 20/11/13 03:27, dongxiao.xu@xxxxxxxxx wrote: > From: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> > > Retrive CQM information for certain domain, which reflects the L3 cache > occupancy for a socket. > > Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx> > Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> > --- > xen/arch/x86/pqos.c | 60 ++++++++++++++++++++++++++++++++++++ > xen/arch/x86/sysctl.c | 64 > +++++++++++++++++++++++++++++++++++++++ > xen/include/asm-x86/msr-index.h | 4 +++ > xen/include/asm-x86/pqos.h | 14 +++++++++ > xen/include/public/domctl.h | 14 +++++++++ > xen/include/public/sysctl.h | 23 ++++++++++++++ > 6 files changed, 179 insertions(+) > > diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c > index 895d892..3699efe 100644 > --- a/xen/arch/x86/pqos.c > +++ b/xen/arch/x86/pqos.c > @@ -19,13 +19,30 @@ > * Place - Suite 330, Boston, MA 02111-1307 USA. > */ > #include <asm/processor.h> > +#include <asm/msr.h> > +#include <xen/cpumask.h> > #include <xen/init.h> > #include <xen/spinlock.h> > +#include <public/domctl.h> > #include <asm/pqos.h> > > static bool_t pqos_enabled = 1; > boolean_param("pqos", pqos_enabled); > > +static void read_qm_data(void *arg) > +{ > + struct qm_element *qm_element = arg; > + > + wrmsr(MSR_IA32_QOSEVTSEL, qm_element->evtid, qm_element->rmid); > + rdmsrl(MSR_IA32_QMC, qm_element->qm_data); > +} > + > +static void get_generic_qm_info(struct qm_element *qm_element) > +{ > + int cpu = qm_element->cpu; > + on_selected_cpus(cpumask_of(cpu), read_qm_data, qm_element, 1); > +} > + > unsigned int cqm_res_count = 0; > unsigned int cqm_upscaling_factor = 0; > bool_t cqm_enabled = 0; > @@ -85,6 +102,25 @@ bool_t system_support_cqm(void) > return cqm_enabled; > } > > +unsigned int get_cqm_count(void) > +{ > + return cqm_res_count; > +} > + > +unsigned int get_cqm_avail(void) > +{ > + unsigned int cqm_avail = 0; > + int i; > + > + for (i = 0; i < cqm_res_count; i++) > + { > + if ( !cqm_res_array[i].inuse ) > + cqm_avail++; > + } Style - extra spaces inside brackets for the for loop, and these braces can go. > + > + return cqm_avail; > +} > + > int alloc_cqm_resource(domid_t domain_id) > { > int i, rmid = -1; > @@ -136,6 +172,30 @@ void free_cqm_resource(domid_t domain_id) > spin_unlock_irqrestore(&cqm_lock, flags); > } > > +void get_cqm_info(uint32_t rmid, cpumask_t cpu_cqmdata_map, > + struct xen_domctl_getdomcqminfo *info) > +{ > + struct qm_element element; > + int cpu, i; > + > + for_each_cpu ( cpu, &cpu_cqmdata_map ) > + { > + element.cpu = cpu; > + element.rmid = rmid; > + element.evtid = QOS_MONITOR_EVTID_L3; > + > + get_generic_qm_info(&element); > + > + i = cpu_to_socket(cpu); > + info->socket_cqmdata[i].valid = > + (element.qm_data & IA32_QM_CTR_ERROR_MASK) ? 0 : 1; > + if ( info->socket_cqmdata[i].valid ) > + info->socket_cqmdata[i].l3c_occupancy = element.qm_data * > cqm_upscaling_factor; > + else > + info->socket_cqmdata[i].l3c_occupancy = 0; > + } > +} > + > /* > * Local variables: > * mode: C > diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c > index 15d4b91..a779fdc 100644 > --- a/xen/arch/x86/sysctl.c > +++ b/xen/arch/x86/sysctl.c > @@ -28,6 +28,7 @@ > #include <xen/nodemask.h> > #include <xen/cpu.h> > #include <xsm/xsm.h> > +#include <asm/pqos.h> > > #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0) > > @@ -101,6 +102,69 @@ long arch_do_sysctl( > } > break; > > + case XEN_SYSCTL_getdomcqminfolist: > + { > + struct domain *d; > + struct xen_domctl_getdomcqminfo info; > + uint32_t resource_count; > + uint32_t resource_avail; > + uint32_t num_domains = 0; > + cpumask_t cpu_cqmdata_map; > + DECLARE_BITMAP(sockets, QOS_MAX_SOCKETS); > + int cpu; unsigned int. > + > + if ( !system_support_cqm() ) > + { > + ret = -EFAULT; ENODEV surely ? > + break; > + } > + > + resource_count = get_cqm_count(); > + resource_avail = get_cqm_avail(); > + > + cpumask_clear(&cpu_cqmdata_map); > + bitmap_zero(sockets, QOS_MAX_SOCKETS); > + for_each_online_cpu(cpu) > + { > + int i = cpu_to_socket(cpu); > + if ( test_and_set_bit(i, sockets) ) > + continue; > + cpumask_set_cpu(cpu, &cpu_cqmdata_map); > + } > + > + rcu_read_lock(&domlist_read_lock); > + for_each_domain ( d ) > + { > + if ( d->domain_id < sysctl->u.getdomaininfolist.first_domain ) > + continue; > + if ( num_domains == sysctl->u.getdomaininfolist.max_domains ) > + break; > + if ( d->arch.pqos_cqm_rmid <= 0 ) > + continue; > + memset(&info, 0, sizeof(struct xen_domctl_getdomcqminfo)); > + info.domain = d->domain_id; > + get_cqm_info(d->arch.pqos_cqm_rmid, cpu_cqmdata_map, &info); > + > + if ( copy_to_guest_offset(sysctl->u.getdomcqminfolist.buffer, > + num_domains, &info, 1) ) > + { > + ret = -EFAULT; > + break; > + } > + > + num_domains++; > + } > + rcu_read_unlock(&domlist_read_lock); > + > + sysctl->u.getdomcqminfolist.num_domains = num_domains; > + sysctl->u.getdomcqminfolist.resource_count = resource_count; > + sysctl->u.getdomcqminfolist.resource_avail = resource_avail; > + > + if ( copy_to_guest(u_sysctl, sysctl, 1) ) > + ret = -EFAULT; > + } > + break; > + > default: > ret = -ENOSYS; > break; > diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h > index e597a28..46ef165 100644 > --- a/xen/include/asm-x86/msr-index.h > +++ b/xen/include/asm-x86/msr-index.h > @@ -488,4 +488,8 @@ > /* Geode defined MSRs */ > #define MSR_GEODE_BUSCONT_CONF0 0x00001900 > > +/* Platform QoS register */ > +#define MSR_IA32_QOSEVTSEL 0x00000c8d > +#define MSR_IA32_QMC 0x00000c8e > + > #endif /* __ASM_MSR_INDEX_H */ > diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h > index 7e32fa5..6d1b1e8 100644 > --- a/xen/include/asm-x86/pqos.h > +++ b/xen/include/asm-x86/pqos.h > @@ -27,15 +27,29 @@ > /* QoS Monitoring Event ID */ > #define QOS_MONITOR_EVTID_L3 0x1 > > +/* IA32_QM_CTR */ > +#define IA32_QM_CTR_ERROR_MASK (0x3ul << 62) > + > struct cqm_res_struct { > bool_t inuse; > uint16_t domain_id; > }; > > +struct qm_element { > + uint16_t cpu; > + uint32_t rmid; > + uint8_t evtid; > + uint64_t qm_data; > +}; > + The packing of this structure is rather poor. Please re-order to reduce some of the holes. Also, cpu parameters are unsigned int in Xen. > void init_platform_qos(void); > > bool_t system_support_cqm(void); > int alloc_cqm_resource(domid_t); > void free_cqm_resource(domid_t); > +unsigned int get_cqm_count(void); > +unsigned int get_cqm_avail(void); > +void get_cqm_info(uint32_t rmid, cpumask_t cpu_cqmdata_map, > + struct xen_domctl_getdomcqminfo *info); > > #endif > diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h > index 4fe21db..bdefc83 100644 > --- a/xen/include/public/domctl.h > +++ b/xen/include/public/domctl.h > @@ -883,6 +883,20 @@ struct xen_domctl_qos_resource { > typedef struct xen_domctl_qos_resource xen_domctl_qos_resource_t; > DEFINE_XEN_GUEST_HANDLE(xen_domctl_qos_resource_t); > > +struct xen_socket_cqmdata { > + uint8_t valid; > + uint64_t l3c_occupancy; > +}; > + > +struct xen_domctl_getdomcqminfo { > + /* OUT variables. */ > + domid_t domain; > +#define QOS_MAX_SOCKETS 128 > + struct xen_socket_cqmdata socket_cqmdata[QOS_MAX_SOCKETS]; > +}; > +typedef struct xen_domctl_getdomcqminfo xen_domctl_getdomcqminfo_t; > +DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomcqminfo_t); > + > struct xen_domctl { > uint32_t cmd; > #define XEN_DOMCTL_createdomain 1 > diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h > index 8437d31..91f206e 100644 > --- a/xen/include/public/sysctl.h > +++ b/xen/include/public/sysctl.h > @@ -149,6 +149,14 @@ struct xen_sysctl_perfc_op { > typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t; > DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t); > > +struct xen_sysctl_getcqminfo > +{ > + uint32_t resource_count; > + uint32_t resource_avail; > + struct xen_domctl_getdomcqminfo *dom_cqminfo; XEN_GUEST_HANDLE please. > +}; > +typedef struct xen_sysctl_getcqminfo xen_sysctl_getcqminfo_t; > + > /* XEN_SYSCTL_getdomaininfolist */ > struct xen_sysctl_getdomaininfolist { > /* IN variables. */ > @@ -632,6 +640,19 @@ struct xen_sysctl_coverage_op { > typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t; > DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t); > > +/* XEN_SYSCTL_getdomcqminfolist */ > +struct xen_sysctl_getdomcqminfolist { > + /* IN variables. */ > + domid_t first_domain; > + uint32_t max_domains; > + XEN_GUEST_HANDLE_64(xen_domctl_getdomcqminfo_t) buffer; > + /* OUT variables. */ > + uint32_t num_domains; > + uint32_t resource_count; > + uint32_t resource_avail; > +}; > +typedef struct xen_sysctl_getdomcqminfolist xen_sysctl_getdomcqminfolist_t; > +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomcqminfolist_t); > > struct xen_sysctl { > uint32_t cmd; > @@ -654,6 +675,7 @@ struct xen_sysctl { > #define XEN_SYSCTL_cpupool_op 18 > #define XEN_SYSCTL_scheduler_op 19 > #define XEN_SYSCTL_coverage_op 20 > +#define XEN_SYSCTL_getdomcqminfolist 21 > uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */ > union { > struct xen_sysctl_readconsole readconsole; > @@ -675,6 +697,7 @@ struct xen_sysctl { > struct xen_sysctl_cpupool_op cpupool_op; > struct xen_sysctl_scheduler_op scheduler_op; > struct xen_sysctl_coverage_op coverage_op; > + struct xen_sysctl_getdomcqminfolist getdomcqminfolist; > uint8_t pad[128]; > } u; > }; _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |