[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 2/2 for-4.12] xen: implement VCPUOP_register_runstate_phys_memory_area



From: Andrii Anisov <andrii_anisov@xxxxxxxx>

VCPUOP_register_runstate_phys_memory_area is implemented via runstate
area mapping.

Signed-off-by: Andrii Anisov <andrii_anisov@xxxxxxxx>
---
 xen/arch/arm/domain.c        | 22 ++++++++++-
 xen/arch/x86/domain.c        | 34 ++++++++++++++--
 xen/common/domain.c          | 92 ++++++++++++++++++++++++++++++++++++++++++--
 xen/include/asm-arm/domain.h |  2 +
 xen/include/xen/domain.h     |  5 +++
 5 files changed, 148 insertions(+), 7 deletions(-)

diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
index ec9bdbd..afc2d48 100644
--- a/xen/arch/arm/domain.c
+++ b/xen/arch/arm/domain.c
@@ -275,7 +275,7 @@ static void ctxt_switch_to(struct vcpu *n)
 }
 
 /* Update per-VCPU guest runstate shared memory area (if registered). */
-static void update_runstate_area(struct vcpu *v)
+void update_runstate_area(struct vcpu *v)
 {
     if ( guest_handle_is_null(runstate_guest(v)) )
         return;
@@ -305,6 +305,26 @@ static void update_runstate_area(struct vcpu *v)
                                 1);
         }
     }
+    else if ( v->runstate_guest_type == RUNSTATE_PADDR )
+    {
+        if ( VM_ASSIST(v->domain, runstate_update_flag) )
+        {
+            runstate_guest(v).p->state_entry_time |= XEN_RUNSTATE_UPDATE;
+            smp_wmb();
+            v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+        }
+
+        memcpy(runstate_guest(v).p, &v->runstate, sizeof(v->runstate));
+
+        if ( VM_ASSIST(v->domain, runstate_update_flag) )
+        {
+            runstate_guest(v).p->state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+            smp_wmb();
+            v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+        }
+    }
+    else
+    { /* No actions required */ }
 }
 
 static void schedule_tail(struct vcpu *prev)
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 2acffba..6598bbb 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1605,7 +1605,7 @@ void paravirt_ctxt_switch_to(struct vcpu *v)
 /* Update per-VCPU guest runstate shared memory area (if registered). */
 bool update_runstate_area(struct vcpu *v)
 {
-    bool rc;
+    bool rc = true;
     struct guest_memory_policy policy = { .nested_guest_mode = false };
     void __user *guest_handle = NULL;
 
@@ -1648,9 +1648,37 @@ bool update_runstate_area(struct vcpu *v)
                                 (void *)(&v->runstate.state_entry_time + 1) - 
1, 1);
         }
     }
-    else
+    else if ( v->runstate_guest_type == RUNSTATE_PADDR )
     {
-        rc = true;
+        if ( VM_ASSIST(v->domain, runstate_update_flag) )
+        {
+            v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE;
+            if ( has_32bit_shinfo((v)->domain) )
+                v->runstate_guest.compat.p->state_entry_time |= 
XEN_RUNSTATE_UPDATE;
+            else
+                runstate_guest(v).p->state_entry_time |= XEN_RUNSTATE_UPDATE;
+            smp_wmb();
+        }
+
+        if ( has_32bit_shinfo(v->domain) )
+        {
+            struct compat_vcpu_runstate_info info;
+
+            XLAT_vcpu_runstate_info(&info, &v->runstate);
+            memcpy(v->runstate_guest.compat.p, &info, sizeof(info));
+        }
+        else
+            memcpy(runstate_guest(v).p, &v->runstate, sizeof(v->runstate));
+
+        if ( VM_ASSIST(v->domain, runstate_update_flag) )
+        {
+            v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+            if ( has_32bit_shinfo((v)->domain) )
+                v->runstate_guest.compat.p->state_entry_time &= 
~XEN_RUNSTATE_UPDATE;
+            else
+                runstate_guest(v).p->state_entry_time &= ~XEN_RUNSTATE_UPDATE;
+            smp_wmb();
+        }
     }
 
     update_guest_memory_policy(v, &policy);
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 2c83ede..cb9c788 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -738,7 +738,14 @@ int domain_kill(struct domain *d)
         if ( cpupool_move_domain(d, cpupool0) )
             return -ERESTART;
         for_each_vcpu ( d, v )
+        {
+            if ( v->runstate_guest_type == RUNSTATE_VADDR )
+                set_xen_guest_handle(runstate_guest(v), NULL);
+            else
+                unmap_runstate_area(v);
+
             unmap_vcpu_info(v);
+        }
         d->is_dying = DOMDYING_dead;
         /* Mem event cleanup has to go here because the rings 
          * have to be put before we call put_domain. */
@@ -1192,7 +1199,11 @@ int domain_soft_reset(struct domain *d)
 
     for_each_vcpu ( d, v )
     {
-        set_xen_guest_handle(runstate_guest(v), NULL);
+        if ( v->runstate_guest_type == RUNSTATE_VADDR )
+            set_xen_guest_handle(runstate_guest(v), NULL);
+        else
+            unmap_runstate_area(v);
+
         unmap_vcpu_info(v);
     }
 
@@ -1333,6 +1344,65 @@ void unmap_vcpu_info(struct vcpu *v)
     put_page_and_type(mfn_to_page(mfn));
 }
 
+int map_runstate_area(struct vcpu *v,
+                      struct vcpu_register_runstate_memory_area *area)
+{
+    unsigned long offset = area->addr.p & ~PAGE_MASK;
+    gfn_t gfn = gaddr_to_gfn(area->addr.p);
+    struct domain *d = v->domain;
+    void *mapping;
+    struct page_info *page;
+    size_t size = sizeof (struct vcpu_runstate_info );
+
+    ASSERT(v->runstate_guest_type == RUNSTATE_PADDR );
+
+    if ( offset > (PAGE_SIZE - size) )
+        return -EINVAL;
+
+    page = get_page_from_gfn(d, gfn_x(gfn), NULL, P2M_ALLOC);
+    if ( !page )
+        return -EINVAL;
+
+    if ( !get_page_type(page, PGT_writable_page) )
+    {
+        put_page(page);
+        return -EINVAL;
+    }
+
+    mapping = __map_domain_page_global(page);
+
+    if ( mapping == NULL )
+    {
+        put_page_and_type(page);
+        return -ENOMEM;
+    }
+
+    runstate_guest(v).p = mapping + offset;
+
+    return 0;
+}
+
+void unmap_runstate_area(struct vcpu *v)
+{
+    mfn_t mfn;
+
+    if ( v->runstate_guest_type != RUNSTATE_PADDR )
+        return;
+
+    if ( guest_handle_is_null(runstate_guest(v)) )
+        return;
+
+    mfn = _mfn(virt_to_mfn(runstate_guest(v).p));
+
+    unmap_domain_page_global((void *)
+                             ((unsigned long)runstate_guest(v).p &
+                              PAGE_MASK));
+
+    v->runstate_guest_type = RUNSTATE_NONE;
+    runstate_guest(v).p = NULL;
+    put_page_and_type(mfn_to_page(mfn));
+}
+
 int default_initialise_vcpu(struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     struct vcpu_guest_context *ctxt;
@@ -1532,13 +1602,29 @@ long do_vcpu_op(int cmd, unsigned int vcpuid, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             vcpu_runstate_get(v, &runstate);
             __copy_to_guest(runstate_guest(v), &runstate, 1);
         }
-
         break;
     }
 
     case VCPUOP_register_runstate_phys_memory_area:
     {
-        rc = -ENOSYS;
+        struct vcpu_register_runstate_memory_area area;
+
+        rc = -EFAULT;
+        if ( copy_from_guest(&area, arg, 1) )
+            break;
+
+        unmap_runstate_area(v);
+        v->runstate_guest_type = RUNSTATE_PADDR;
+        rc = map_runstate_area(v, &area);
+
+        if ( rc )
+        {
+            v->runstate_guest_type = RUNSTATE_NONE;
+            break;
+        }
+
+        update_runstate_area(v);
+
         break;
     }
 
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 312fec8..3fb6ea2 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -217,6 +217,8 @@ void vcpu_show_execution_state(struct vcpu *);
 void vcpu_show_registers(const struct vcpu *);
 void vcpu_switch_to_aarch64_mode(struct vcpu *);
 
+void update_runstate_area(struct vcpu *);
+
 /*
  * Due to the restriction of GICv3, the number of vCPUs in AFF0 is
  * limited to 16, thus only the first 4 bits of AFF0 are legal. We will
diff --git a/xen/include/xen/domain.h b/xen/include/xen/domain.h
index d1bfc82..090a54d 100644
--- a/xen/include/xen/domain.h
+++ b/xen/include/xen/domain.h
@@ -118,4 +118,9 @@ struct vnuma_info {
 
 void vnuma_destroy(struct vnuma_info *vnuma);
 
+struct vcpu_register_runstate_memory_area;
+int map_runstate_area(struct vcpu *v,
+                      struct vcpu_register_runstate_memory_area *area);
+void unmap_runstate_area(struct vcpu *v);
+
 #endif /* __XEN_DOMAIN_H__ */
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.