[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v14 05/10] x86: dynamically attach/detach QoS monitoring service for a guest
Add hypervisor side support for dynamically attach and detach QoS monitoring services for a certain guest. When attach Qos monitoring service for a guest, system will allocate an RMID for it. When detach or guest is shutdown, the RMID will be recycled for future use. Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> Signed-off-by: Chao Peng <chao.p.peng@xxxxxxxxxxxxxxx> --- xen/arch/x86/domain.c | 3 +++ xen/arch/x86/domctl.c | 29 ++++++++++++++++++++++++++ xen/arch/x86/pqos.c | 46 ++++++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/domain.h | 2 ++ xen/include/asm-x86/pqos.h | 9 +++++++++ xen/include/public/domctl.h | 12 +++++++++++ 6 files changed, 101 insertions(+) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index f7e0e78..99fce11 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -60,6 +60,7 @@ #include <xen/numa.h> #include <xen/iommu.h> #include <compat/vcpu.h> +#include <asm/pqos.h> DEFINE_PER_CPU(struct vcpu *, curr_vcpu); DEFINE_PER_CPU(unsigned long, cr4); @@ -644,6 +645,8 @@ void arch_domain_destroy(struct domain *d) free_xenheap_page(d->shared_info); cleanup_domain_irq_mapping(d); + + pqos_monitor_free_rmid(d); } unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index d1517c4..cc719b1 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -35,6 +35,7 @@ #include <asm/mem_sharing.h> #include <asm/xstate.h> #include <asm/debugger.h> +#include <asm/pqos.h> static int gdbsx_guest_mem_io( domid_t domid, struct xen_domctl_gdbsx_memio *iop) @@ -1395,6 +1396,34 @@ long arch_do_domctl( } break; + case XEN_DOMCTL_pqos_monitor_op: + if ( !pqos_monitor_enabled() ) + { + ret = -ENODEV; + break; + } + + switch ( domctl->u.pqos_monitor_op.cmd ) + { + case XEN_DOMCTL_PQOS_MONITOR_OP_ATTACH: + ret = pqos_monitor_alloc_rmid(d); + break; + case XEN_DOMCTL_PQOS_MONITOR_OP_DETACH: + if ( d->arch.pqos_rmid > 0 ) + pqos_monitor_free_rmid(d); + else + ret = -ENOENT; + break; + case XEN_DOMCTL_PQOS_MONITOR_OP_QUERY_RMID: + domctl->u.pqos_monitor_op.data = d->arch.pqos_rmid; + copyback = 1; + break; + default: + ret = -ENOSYS; + break; + } + break; + default: ret = iommu_do_domctl(domctl, d, u_domctl); break; diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c index 44eee69..6f7e0ee 100644 --- a/xen/arch/x86/pqos.c +++ b/xen/arch/x86/pqos.c @@ -15,6 +15,7 @@ */ #include <xen/init.h> #include <xen/cpu.h> +#include <xen/sched.h> #include <asm/pqos.h> struct pqos_monitor *__read_mostly pqosm = NULL; @@ -102,6 +103,51 @@ void __init init_platform_qos(void) init_pqos_monitor(opt_rmid_max); } +/* Called with domain lock held, no pqos specific lock needed */ +int pqos_monitor_alloc_rmid(struct domain *d) +{ + unsigned int rmid; + + ASSERT(pqos_monitor_enabled()); + + if ( d->arch.pqos_rmid > 0 ) + return -EEXIST; + + for ( rmid = 1; rmid <= pqosm->rmid_max; rmid++ ) + { + if ( pqosm->rmid_to_dom[rmid] != DOMID_INVALID) + continue; + + pqosm->rmid_to_dom[rmid] = d->domain_id; + break; + } + + /* No RMID available, assign RMID=0 by default */ + if ( rmid > pqosm->rmid_max ) + { + d->arch.pqos_rmid = 0; + return -EUSERS; + } + + d->arch.pqos_rmid = rmid; + + return 0; +} + +/* Called with domain lock held, no pqos specific lock needed */ +void pqos_monitor_free_rmid(struct domain *d) +{ + unsigned int rmid; + + rmid = d->arch.pqos_rmid; + /* We do not free system reserved "RMID=0" */ + if ( rmid == 0 ) + return; + + pqosm->rmid_to_dom[rmid] = DOMID_INVALID; + d->arch.pqos_rmid = 0; +} + /* * Local variables: * mode: C diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 83329ed..ca295fa 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -313,6 +313,8 @@ struct arch_domain /* Shared page for notifying that explicit PIRQ EOI is required. */ unsigned long *pirq_eoi_map; unsigned long pirq_eoi_map_mfn; + + unsigned int pqos_rmid; /* QoS monitoring RMID assigned to the domain */ } __cacheline_aligned; #define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list)) diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h index 96bfdc6..5737b51 100644 --- a/xen/include/asm-x86/pqos.h +++ b/xen/include/asm-x86/pqos.h @@ -16,6 +16,8 @@ #ifndef __ASM_PQOS_H__ #define __ASM_PQOS_H__ +#include <xen/types.h> + /* QoS Resource Type Enumeration */ #define QOS_MONITOR_TYPE_L3 0x2 @@ -38,7 +40,14 @@ struct pqos_monitor { extern struct pqos_monitor *pqosm; +static inline bool_t pqos_monitor_enabled(void) +{ + return !!pqosm; +} + void init_platform_qos(void); +int pqos_monitor_alloc_rmid(struct domain *d); +void pqos_monitor_free_rmid(struct domain *d); #endif /* __ASM_PQOS_H__ */ diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h index 5b11bbf..af9e775 100644 --- a/xen/include/public/domctl.h +++ b/xen/include/public/domctl.h @@ -936,6 +936,16 @@ typedef struct xen_domctl_vcpu_msrs xen_domctl_vcpu_msrs_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msrs_t); #endif +struct xen_domctl_pqos_monitor_op { +#define XEN_DOMCTL_PQOS_MONITOR_OP_DETACH 0 +#define XEN_DOMCTL_PQOS_MONITOR_OP_ATTACH 1 +#define XEN_DOMCTL_PQOS_MONITOR_OP_QUERY_RMID 2 + uint32_t cmd; + uint32_t data; +}; +typedef struct xen_domctl_pqos_op xen_domctl_pqos_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_pqos_op_t); + struct xen_domctl { uint32_t cmd; #define XEN_DOMCTL_createdomain 1 @@ -1008,6 +1018,7 @@ struct xen_domctl { #define XEN_DOMCTL_cacheflush 71 #define XEN_DOMCTL_get_vcpu_msrs 72 #define XEN_DOMCTL_set_vcpu_msrs 73 +#define XEN_DOMCTL_pqos_monitor_op 74 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -1068,6 +1079,7 @@ struct xen_domctl { struct xen_domctl_cacheflush cacheflush; struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; + struct xen_domctl_pqos_monitor_op pqos_monitor_op; uint8_t pad[128]; } u; }; -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |