[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v12 5/9] x86: dynamically attach/detach QoS monitoring service for a guest
Add hypervisor side support for dynamically attach and detach QoS monitoring services for a certain guest. When attach Qos monitoring service for a guest, system will allocate an RMID for it. When detach or guest is shutdown, the RMID will be recycled for future use. Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx> --- xen/arch/x86/domain.c | 3 +++ xen/arch/x86/domctl.c | 29 ++++++++++++++++++++++++ xen/arch/x86/pqos.c | 50 ++++++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/domain.h | 2 ++ xen/include/asm-x86/pqos.h | 8 +++++++ xen/include/public/domctl.h | 12 ++++++++++ 6 files changed, 104 insertions(+) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index e896210..f8e0e33 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -60,6 +60,7 @@ #include <xen/numa.h> #include <xen/iommu.h> #include <compat/vcpu.h> +#include <asm/pqos.h> DEFINE_PER_CPU(struct vcpu *, curr_vcpu); DEFINE_PER_CPU(unsigned long, cr4); @@ -636,6 +637,8 @@ void arch_domain_destroy(struct domain *d) free_xenheap_page(d->shared_info); cleanup_domain_irq_mapping(d); + + pqos_monitor_free_rmid(d); } unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4) diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index a4effc3..3958830 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -35,6 +35,7 @@ #include <asm/mem_sharing.h> #include <asm/xstate.h> #include <asm/debugger.h> +#include <asm/pqos.h> static int gdbsx_guest_mem_io( domid_t domid, struct xen_domctl_gdbsx_memio *iop) @@ -1395,6 +1396,34 @@ long arch_do_domctl( } break; + case XEN_DOMCTL_pqos_monitor_op: + if ( !pqos_monitor_enabled() ) + { + ret = -ENODEV; + break; + } + + switch ( domctl->u.pqos_monitor_op.cmd ) + { + case XEN_DOMCTL_PQOS_MONITOR_OP_ATTACH: + ret = pqos_monitor_alloc_rmid(d); + break; + case XEN_DOMCTL_PQOS_MONITOR_OP_DETACH: + if ( d->arch.pqos_rmid > 0 ) + pqos_monitor_free_rmid(d); + else + ret = -ENOENT; + break; + case XEN_DOMCTL_PQOS_MONITOR_OP_QUERY_RMID: + domctl->u.pqos_monitor_op.data = d->arch.pqos_rmid; + copyback = 1; + break; + default: + ret = -ENOSYS; + break; + } + break; + default: ret = iommu_do_domctl(domctl, d, u_domctl); break; diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c index 513dd0f..18fd1f9 100644 --- a/xen/arch/x86/pqos.c +++ b/xen/arch/x86/pqos.c @@ -106,6 +106,56 @@ void __init init_platform_qos(void) init_pqos_monitor(opt_rmid_max); } +int pqos_monitor_alloc_rmid(struct domain *d) +{ + int rc = 0; + unsigned int rmid; + + ASSERT(pqos_monitor_enabled()); + + if ( d->arch.pqos_rmid > 0 ) + { + rc = -EEXIST; + return rc; + } + + for ( rmid = pqosm->rmid_min; rmid <= pqosm->rmid_max; rmid++ ) + { + if ( pqosm->rmid_to_dom[rmid] != DOMID_INVALID) + continue; + + pqosm->rmid_to_dom[rmid] = d->domain_id; + break; + } + + /* No RMID available, assign RMID=0 by default */ + if ( rmid > pqosm->rmid_max ) + { + rmid = 0; + rc = -EUSERS; + } + else + pqosm->rmid_inuse++; + + d->arch.pqos_rmid = rmid; + + return rc; +} + +void pqos_monitor_free_rmid(struct domain *d) +{ + unsigned int rmid; + + rmid = d->arch.pqos_rmid; + /* We do not free system reserved "RMID=0" */ + if ( rmid == 0 ) + return; + + pqosm->rmid_to_dom[rmid] = DOMID_INVALID; + d->arch.pqos_rmid = 0; + pqosm->rmid_inuse--; +} + /* * Local variables: * mode: C diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index abf55fb..cd53108 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -313,6 +313,8 @@ struct arch_domain /* Shared page for notifying that explicit PIRQ EOI is required. */ unsigned long *pirq_eoi_map; unsigned long pirq_eoi_map_mfn; + + unsigned int pqos_rmid; /* QoS monitoring RMID assigned to the domain */ } __cacheline_aligned; #define has_arch_pdevs(d) (!list_empty(&(d)->arch.pdev_list)) diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h index 1ec7970..16aeb24 100644 --- a/xen/include/asm-x86/pqos.h +++ b/xen/include/asm-x86/pqos.h @@ -16,6 +16,7 @@ #ifndef __ASM_PQOS_H__ #define __ASM_PQOS_H__ +#include <xen/sched.h> #include <public/xen.h> /* QoS Resource Type Enumeration */ @@ -41,7 +42,14 @@ struct pqos_monitor { }; extern struct pqos_monitor *pqosm; +static inline bool_t pqos_monitor_enabled(void) +{ + return !!pqosm; +} + void __init init_platform_qos(void); +int pqos_monitor_alloc_rmid(struct domain *d); +void pqos_monitor_free_rmid(struct domain *d); #endif /* __ASM_PQOS_H__ */ diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h index 5b11bbf..af9e775 100644 --- a/xen/include/public/domctl.h +++ b/xen/include/public/domctl.h @@ -936,6 +936,16 @@ typedef struct xen_domctl_vcpu_msrs xen_domctl_vcpu_msrs_t; DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msrs_t); #endif +struct xen_domctl_pqos_monitor_op { +#define XEN_DOMCTL_PQOS_MONITOR_OP_DETACH 0 +#define XEN_DOMCTL_PQOS_MONITOR_OP_ATTACH 1 +#define XEN_DOMCTL_PQOS_MONITOR_OP_QUERY_RMID 2 + uint32_t cmd; + uint32_t data; +}; +typedef struct xen_domctl_pqos_op xen_domctl_pqos_op_t; +DEFINE_XEN_GUEST_HANDLE(xen_domctl_pqos_op_t); + struct xen_domctl { uint32_t cmd; #define XEN_DOMCTL_createdomain 1 @@ -1008,6 +1018,7 @@ struct xen_domctl { #define XEN_DOMCTL_cacheflush 71 #define XEN_DOMCTL_get_vcpu_msrs 72 #define XEN_DOMCTL_set_vcpu_msrs 73 +#define XEN_DOMCTL_pqos_monitor_op 74 #define XEN_DOMCTL_gdbsx_guestmemio 1000 #define XEN_DOMCTL_gdbsx_pausevcpu 1001 #define XEN_DOMCTL_gdbsx_unpausevcpu 1002 @@ -1068,6 +1079,7 @@ struct xen_domctl { struct xen_domctl_cacheflush cacheflush; struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu; struct xen_domctl_gdbsx_domstatus gdbsx_domstatus; + struct xen_domctl_pqos_monitor_op pqos_monitor_op; uint8_t pad[128]; } u; }; -- 1.7.9.5 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |