[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v6 2/7] x86: dynamically attach/detach CQM service for a guest



Add hypervisor side support for dynamically attach and detach CQM
services for a certain guest.

When attach CQM service for a guest, system will allocate an RMID for
it. When detach or guest is shutdown, the RMID will be retrieved back
for future use.

Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Signed-off-by: Jiongxi Li <jiongxi.li@xxxxxxxxx>
Signed-off-by: Dongxiao Xu <dongxiao.xu@xxxxxxxxx>
---
 xen/arch/x86/domain.c        |    3 +++
 xen/arch/x86/domctl.c        |   40 +++++++++++++++++++++++++++++++++
 xen/arch/x86/pqos.c          |   51 ++++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/domain.h |    2 ++
 xen/include/asm-x86/pqos.h   |    5 +++++
 xen/include/public/domctl.h  |   11 +++++++++
 6 files changed, 112 insertions(+)

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index a3868f9..90e52a2 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -60,6 +60,7 @@
 #include <xen/numa.h>
 #include <xen/iommu.h>
 #include <compat/vcpu.h>
+#include <asm/pqos.h>
 
 DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
 DEFINE_PER_CPU(unsigned long, cr4);
@@ -612,6 +613,8 @@ void arch_domain_destroy(struct domain *d)
 
     free_xenheap_page(d->shared_info);
     cleanup_domain_irq_mapping(d);
+
+    free_cqm_rmid(d);
 }
 
 unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index f7e4586..7007990 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -35,6 +35,7 @@
 #include <asm/mem_sharing.h>
 #include <asm/xstate.h>
 #include <asm/debugger.h>
+#include <asm/pqos.h>
 
 static int gdbsx_guest_mem_io(
     domid_t domid, struct xen_domctl_gdbsx_memio *iop)
@@ -1223,6 +1224,45 @@ long arch_do_domctl(
     }
     break;
 
+    case XEN_DOMCTL_attach_pqos:
+    {
+        if ( domctl->u.qos_type.flags & XEN_DOMCTL_pqos_cqm )
+        {
+            if ( !system_supports_cqm() )
+                ret = -ENODEV;
+            else if ( d->arch.pqos_cqm_rmid > 0 )
+                ret = -EEXIST;
+            else
+            {
+                ret = alloc_cqm_rmid(d);
+                if ( ret < 0 )
+                    ret = -EUSERS;
+            }
+        }
+        else
+            ret = -EINVAL;
+    }
+    break;
+
+    case XEN_DOMCTL_detach_pqos:
+    {
+        if ( domctl->u.qos_type.flags & XEN_DOMCTL_pqos_cqm )
+        {
+            if ( !system_supports_cqm() )
+                ret = -ENODEV;
+            else if ( d->arch.pqos_cqm_rmid > 0 )
+            {
+                free_cqm_rmid(d);
+                ret = 0;
+            }
+            else
+                ret = -ENOENT;
+        }
+        else
+            ret = -EINVAL;
+    }
+    break;
+
     default:
         ret = iommu_do_domctl(domctl, d, u_domctl);
         break;
diff --git a/xen/arch/x86/pqos.c b/xen/arch/x86/pqos.c
index d78048a..67d733e 100644
--- a/xen/arch/x86/pqos.c
+++ b/xen/arch/x86/pqos.c
@@ -16,6 +16,7 @@
  */
 #include <asm/processor.h>
 #include <xen/init.h>
+#include <xen/spinlock.h>
 #include <asm/pqos.h>
 
 static bool_t __initdata opt_pqos = 1;
@@ -59,6 +60,7 @@ static void __init parse_pqos_param(char *s)
 custom_param("pqos", parse_pqos_param);
 
 struct pqos_cqm *cqm;
+static DEFINE_SPINLOCK(cqm_lock);
 
 static void __init init_cqm(void)
 {
@@ -119,6 +121,55 @@ void __init init_platform_qos(void)
     init_qos_monitor();
 }
 
+bool_t system_supports_cqm(void)
+{
+    return !!cqm;
+}
+
+int alloc_cqm_rmid(struct domain *d)
+{
+    int rc = 0;
+    unsigned int rmid;
+    unsigned long flags;
+
+    ASSERT(system_supports_cqm());
+
+    spin_lock_irqsave(&cqm_lock, flags);
+    for ( rmid = cqm->min_rmid; rmid <= cqm->max_rmid; rmid++ )
+    {
+        if ( cqm->rmid_to_dom[rmid] != DOMID_INVALID)
+            continue;
+
+        cqm->rmid_to_dom[rmid] = d->domain_id;
+        break;
+    }
+    spin_unlock_irqrestore(&cqm_lock, flags);
+
+    /* No CQM RMID available, assign RMID=0 by default */
+    if ( rmid > cqm->max_rmid )
+    {
+        rmid = 0;
+        rc = -1;
+    }
+
+    d->arch.pqos_cqm_rmid = rmid;
+
+    return rc;
+}
+
+void free_cqm_rmid(struct domain *d)
+{
+    unsigned int rmid = d->arch.pqos_cqm_rmid;
+
+    /* We do not free system reserved "RMID=0" */
+    if ( rmid == 0 )
+        return;
+
+    cqm->rmid_to_dom[rmid] = DOMID_INVALID;
+
+    d->arch.pqos_cqm_rmid = 0;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 9d39061..9487251 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -313,6 +313,8 @@ struct arch_domain
     spinlock_t e820_lock;
     struct e820entry *e820;
     unsigned int nr_e820;
+
+    unsigned int pqos_cqm_rmid;       /* CQM RMID assigned to the domain */
 } __cacheline_aligned;
 
 #define has_arch_pdevs(d)    (!list_empty(&(d)->arch.pdev_list))
diff --git a/xen/include/asm-x86/pqos.h b/xen/include/asm-x86/pqos.h
index da925db..16c4882 100644
--- a/xen/include/asm-x86/pqos.h
+++ b/xen/include/asm-x86/pqos.h
@@ -16,6 +16,7 @@
  */
 #ifndef ASM_PQOS_H
 #define ASM_PQOS_H
+#include <xen/sched.h>
 
 #include <public/xen.h>
 
@@ -35,4 +36,8 @@ extern struct pqos_cqm *cqm;
 
 void init_platform_qos(void);
 
+bool_t system_supports_cqm(void);
+int alloc_cqm_rmid(struct domain *d);
+void free_cqm_rmid(struct domain *d);
+
 #endif
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 01a3652..d53e216 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -869,6 +869,14 @@ struct xen_domctl_set_max_evtchn {
 typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
 
+struct xen_domctl_qos_type {
+#define _XEN_DOMCTL_pqos_cqm      0
+#define XEN_DOMCTL_pqos_cqm       (1U<<_XEN_DOMCTL_pqos_cqm)
+    uint64_t flags;
+};
+typedef struct xen_domctl_qos_type xen_domctl_qos_type_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_qos_type_t);
+
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
@@ -938,6 +946,8 @@ struct xen_domctl {
 #define XEN_DOMCTL_setnodeaffinity               68
 #define XEN_DOMCTL_getnodeaffinity               69
 #define XEN_DOMCTL_set_max_evtchn                70
+#define XEN_DOMCTL_attach_pqos                   71
+#define XEN_DOMCTL_detach_pqos                   72
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -998,6 +1008,7 @@ struct xen_domctl {
         struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
         struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
         struct xen_domctl_gdbsx_domstatus   gdbsx_domstatus;
+        struct xen_domctl_qos_type          qos_type;
         uint8_t                             pad[128];
     } u;
 };
-- 
1.7.9.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.