|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v7 3/6] x86: collect CQM information from all sockets
On 03/02/14 11:36, Dongxiao Xu wrote:
> Collect CQM information (L3 cache occupancy) from all sockets.
> Upper layer application can parse the data structure to get the
> information of guest's L3 cache occupancy on certain sockets.
>
> Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx>
> Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
> ---
> xen/arch/x86/pqos.c | 43 ++++++++++++++++++++++++++++
> xen/arch/x86/sysctl.c | 59
> +++++++++++++++++++++++++++++++++++++++
> xen/include/asm-x86/msr-index.h | 4 +++
> xen/include/asm-x86/pqos.h | 4 +++
> xen/include/public/sysctl.h | 11 ++++++++
> 5 files changed, 121 insertions(+)
>
> diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c
> index eb469ac..2cde56e 100644
> --- a/xen/arch/x86/pqos.c
> +++ b/xen/arch/x86/pqos.c
> @@ -15,6 +15,7 @@
> * more details.
> */
> #include <asm/processor.h>
> +#include <asm/msr.h>
> #include <xen/init.h>
> #include <xen/mm.h>
> #include <xen/spinlock.h>
> @@ -205,6 +206,48 @@ out:
> spin_unlock(&cqm->cqm_lock);
> }
>
> +static void read_cqm_data(void *arg)
> +{
> + uint64_t cqm_data;
> + unsigned int rmid;
> + int socket = cpu_to_socket(smp_processor_id());
> + unsigned long i;
> +
> + ASSERT(system_supports_cqm());
> +
> + if ( socket < 0 )
> + return;
> +
> + for ( rmid = cqm->min_rmid; rmid <= cqm->max_rmid; rmid++ )
> + {
> + if ( cqm->rmid_to_dom[rmid] == DOMID_INVALID )
> + continue;
> +
> + wrmsr(MSR_IA32_QOSEVTSEL, QOS_MONITOR_EVTID_L3, rmid);
> + rdmsrl(MSR_IA32_QMC, cqm_data);
> +
> + i = (unsigned long)(cqm->max_rmid + 1) * socket + rmid;
> + if ( !(cqm_data & IA32_QM_CTR_ERROR_MASK) )
> + cqm->buffer[i] = cqm_data * cqm->upscaling_factor;
> + }
> +}
> +
> +void get_cqm_info(const cpumask_t *cpu_cqmdata_map)
> +{
> + unsigned int nr_sockets = cpumask_weight(cpu_cqmdata_map) + 1;
> + unsigned int nr_rmids = cqm->max_rmid + 1;
> +
> + /* Read CQM data in current CPU */
> + read_cqm_data(NULL);
> + /* Issue IPI to other CPUs to read CQM data */
> + on_selected_cpus(cpu_cqmdata_map, read_cqm_data, NULL, 1);
> +
> + /* Copy the rmid_to_dom info to the buffer */
> + memcpy(cqm->buffer + nr_sockets * nr_rmids, cqm->rmid_to_dom,
> + sizeof(domid_t) * (cqm->max_rmid + 1));
> +
> +}
> +
> /*
> * Local variables:
> * mode: C
> diff --git a/xen/arch/x86/sysctl.c b/xen/arch/x86/sysctl.c
> index 15d4b91..5391800 100644
> --- a/xen/arch/x86/sysctl.c
> +++ b/xen/arch/x86/sysctl.c
> @@ -28,6 +28,7 @@
> #include <xen/nodemask.h>
> #include <xen/cpu.h>
> #include <xsm/xsm.h>
> +#include <asm/pqos.h>
>
> #define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
>
> @@ -66,6 +67,30 @@ void arch_do_physinfo(xen_sysctl_physinfo_t *pi)
> pi->capabilities |= XEN_SYSCTL_PHYSCAP_hvm_directio;
> }
>
> +/* Select one random CPU for each socket. Current CPU's socket is excluded */
> +static void select_socket_cpu(cpumask_t *cpu_bitmap)
> +{
> + int i;
> + unsigned int cpu;
> + int socket, socket_curr = cpu_to_socket(smp_processor_id());
> + DECLARE_BITMAP(sockets, NR_CPUS);
> +
> + bitmap_zero(sockets, NR_CPUS);
> + if (socket_curr >= 0)
> + set_bit(socket_curr, sockets);
> +
> + cpumask_clear(cpu_bitmap);
> + for ( i = 0; i < NR_CPUS; i++ )
> + {
> + socket = cpu_to_socket(i);
> + if ( socket < 0 || test_and_set_bit(socket, sockets) )
> + continue;
> + cpu = cpumask_any(per_cpu(cpu_core_mask, i));
> + if ( cpu < nr_cpu_ids )
> + cpumask_set_cpu(cpu, cpu_bitmap);
> + }
> +}
> +
> long arch_do_sysctl(
> struct xen_sysctl *sysctl, XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
> {
> @@ -101,6 +126,40 @@ long arch_do_sysctl(
> }
> break;
>
> + case XEN_SYSCTL_getcqminfo:
> + {
> + cpumask_var_t cpu_cqmdata_map;
> +
> + if ( !zalloc_cpumask_var(&cpu_cqmdata_map) )
> + {
> + ret = -ENOMEM;
> + break;
> + }
> +
> + if ( !system_supports_cqm() )
> + {
> + ret = -ENODEV;
> + free_cpumask_var(cpu_cqmdata_map);
> + break;
> + }
Check for -ENODEV first, to avoid pointless memory allocation.
~Andrew
> +
> + memset(cqm->buffer, 0, cqm->buffer_size);
> +
> + select_socket_cpu(cpu_cqmdata_map);
> + get_cqm_info(cpu_cqmdata_map);
> +
> + sysctl->u.getcqminfo.buffer_mfn = virt_to_mfn(cqm->buffer);
> + sysctl->u.getcqminfo.size = cqm->buffer_size;
> + sysctl->u.getcqminfo.nr_rmids = cqm->max_rmid + 1;
> + sysctl->u.getcqminfo.nr_sockets = cpumask_weight(cpu_cqmdata_map) +
> 1;
> +
> + if ( __copy_to_guest(u_sysctl, sysctl, 1) )
> + ret = -EFAULT;
> +
> + free_cpumask_var(cpu_cqmdata_map);
> + }
> + break;
> +
> default:
> ret = -ENOSYS;
> break;
> diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
> index fc9fbc6..e3ff10c 100644
> --- a/xen/include/asm-x86/msr-index.h
> +++ b/xen/include/asm-x86/msr-index.h
> @@ -489,4 +489,8 @@
> /* Geode defined MSRs */
> #define MSR_GEODE_BUSCONT_CONF0 0x00001900
>
> +/* Platform QoS register */
> +#define MSR_IA32_QOSEVTSEL 0x00000c8d
> +#define MSR_IA32_QMC 0x00000c8e
> +
> #endif /* __ASM_MSR_INDEX_H */
> diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h
> index f25037d..87820d5 100644
> --- a/xen/include/asm-x86/pqos.h
> +++ b/xen/include/asm-x86/pqos.h
> @@ -17,6 +17,8 @@
> #ifndef ASM_PQOS_H
> #define ASM_PQOS_H
> #include <xen/sched.h>
> +#include <xen/cpumask.h>
> +#include <public/domctl.h>
>
> #include <public/xen.h>
> #include <xen/spinlock.h>
> @@ -51,5 +53,7 @@ void init_platform_qos(void);
>
> int alloc_cqm_rmid(struct domain *d);
> void free_cqm_rmid(struct domain *d);
> +void get_cqm_info(const cpumask_t *cpu_cqmdata_map);
> +void cqm_assoc_rmid(unsigned int rmid);
This function prototype lives in the next patch alongside its
implementation.
~Andrew
>
> #endif
> diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h
> index 8437d31..335b1d9 100644
> --- a/xen/include/public/sysctl.h
> +++ b/xen/include/public/sysctl.h
> @@ -632,6 +632,15 @@ struct xen_sysctl_coverage_op {
> typedef struct xen_sysctl_coverage_op xen_sysctl_coverage_op_t;
> DEFINE_XEN_GUEST_HANDLE(xen_sysctl_coverage_op_t);
>
> +struct xen_sysctl_getcqminfo {
> + uint64_aligned_t buffer_mfn;
> + uint32_t size;
> + uint32_t nr_rmids;
> + uint32_t nr_sockets;
> +};
> +typedef struct xen_sysctl_getcqminfo xen_sysctl_getcqminfo_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcqminfo_t);
> +
>
> struct xen_sysctl {
> uint32_t cmd;
> @@ -654,6 +663,7 @@ struct xen_sysctl {
> #define XEN_SYSCTL_cpupool_op 18
> #define XEN_SYSCTL_scheduler_op 19
> #define XEN_SYSCTL_coverage_op 20
> +#define XEN_SYSCTL_getcqminfo 21
> uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
> union {
> struct xen_sysctl_readconsole readconsole;
> @@ -675,6 +685,7 @@ struct xen_sysctl {
> struct xen_sysctl_cpupool_op cpupool_op;
> struct xen_sysctl_scheduler_op scheduler_op;
> struct xen_sysctl_coverage_op coverage_op;
> + struct xen_sysctl_getcqminfo getcqminfo;
> uint8_t pad[128];
> } u;
> };
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |