[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v13 09/11] x86/domctl: Don't pause the whole domain if only getting vcpu state


  • To: xen-devel@xxxxxxxxxxxxx
  • From: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Date: Thu, 19 Jul 2018 17:08:07 +0300
  • Cc: wei.liu2@xxxxxxxxxx, andrew.cooper3@xxxxxxxxxx, ian.jackson@xxxxxxxxxxxxx, paul.durrant@xxxxxxxxxx, jbeulich@xxxxxxxx, Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Comment: DomainKeys? See http://domainkeys.sourceforge.net/
  • Delivery-date: Thu, 19 Jul 2018 14:08:44 +0000
  • Domainkey-signature: a=rsa-sha1; q=dns; c=nofws; s=default; d=bitdefender.com; b=SoH8VubIlRH2hEMN8VkBSc9n3GTwZsq/VPu28Wa0uT0ipovAhPD/wZ6yKKEdqGn2T6fU7BEtlOyThmSqkEKslbg1B5Rh0L/T3aeMxCzMJvgvYjvt8rCn16vOFZQ63y6KhAe1+Um/3029TwcSk9kZXnpGQLBaPGTw5/BFkYp2u2k2Amn9mbEaTXarZl4nGyoW8YfD5AAA9lBb1Q07VlYH8fEBT0qYHFO1bK228PH4eRup7AY++uO7avk1ordsh0BeSLTu5rTAIFIeAQcXXq/X3BYWtjnh77teNj3opxMtCnHXi+HvLILLcLXjiHiLqE52tfjUWISxG+RM2ojmhwb4yA==; h=Received:Received:Received:Received:From:To:Cc:Subject:Date:Message-Id:X-Mailer:In-Reply-To:References;
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

This patch is focused on moving the for loop to the caller so
now we can save info for a single vcpu instance with the save_one
handlers.

Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>

---
Changes since V11:
        - Changed the CONTINUE return to return 0.
---
 xen/arch/x86/hvm/hvm.c  |  19 ++++---
 xen/arch/x86/hvm/save.c | 137 +++++++++++++++++++++++++++++++++++++-----------
 2 files changed, 116 insertions(+), 40 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 9ee1129..09c088f 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -793,6 +793,14 @@ static int hvm_save_cpu_ctxt_one(struct vcpu *v, 
hvm_domain_context_t *h)
     struct segment_register seg;
     struct hvm_hw_cpu ctxt = {};
 
+    /*
+     * We don't need to save state for a vcpu that is down; the restore
+     * code will leave it down if there is nothing saved.
+     */
+    if ( v->pause_flags & VPF_down )
+        return 0;
+
+
     /* Architecture-specific vmcs/vmcb bits */
     hvm_funcs.save_cpu_ctxt(v, &ctxt);
 
@@ -897,13 +905,6 @@ static int hvm_save_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
 
     for_each_vcpu ( d, v )
     {
-        /*
-         * We don't need to save state for a vcpu that is down; the restore
-         * code will leave it down if there is nothing saved.
-         */
-        if ( v->pause_flags & VPF_down )
-            continue;
-
         err = hvm_save_cpu_ctxt_one(v, h);
         if ( err )
             break;
@@ -1196,7 +1197,7 @@ static int hvm_save_cpu_xsave_states_one(struct vcpu *v, 
hvm_domain_context_t *h
     unsigned int size = HVM_CPU_XSAVE_SIZE(v->arch.xcr0_accum);
     int err = 0;
 
-    if ( !cpu_has_xsave )
+    if ( !cpu_has_xsave || !xsave_enabled(v) )
         return 0;   /* do nothing */
 
     err = _hvm_init_entry(h, CPU_XSAVE_CODE, v->vcpu_id, size);
@@ -1221,8 +1222,6 @@ static int hvm_save_cpu_xsave_states(struct domain *d, 
hvm_domain_context_t *h)
 
     for_each_vcpu ( d, v )
     {
-        if ( !xsave_enabled(v) )
-            continue;
         err = hvm_save_cpu_xsave_states_one(v, h);
         if ( err )
             break;
diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c
index b674937..d57648d 100644
--- a/xen/arch/x86/hvm/save.c
+++ b/xen/arch/x86/hvm/save.c
@@ -138,9 +138,12 @@ size_t hvm_save_size(struct domain *d)
 int hvm_save_one(struct domain *d, unsigned int typecode, unsigned int 
instance,
                  XEN_GUEST_HANDLE_64(uint8) handle, uint64_t *bufsz)
 {
-    int rv;
+    int rv = 0;
     hvm_domain_context_t ctxt = { };
     const struct hvm_save_descriptor *desc;
+    bool is_single_instance = false;
+    uint32_t off = 0;
+    struct vcpu *v;
 
     if ( d->is_dying ||
          typecode > HVM_SAVE_CODE_MAX ||
@@ -148,43 +151,94 @@ int hvm_save_one(struct domain *d, unsigned int typecode, 
unsigned int instance,
          !hvm_sr_handlers[typecode].save )
         return -EINVAL;
 
+    if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU &&
+        instance < d->max_vcpus )
+        is_single_instance = true;
+
     ctxt.size = hvm_sr_handlers[typecode].size;
-    if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU )
+    if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU &&
+        instance == d->max_vcpus )
         ctxt.size *= d->max_vcpus;
     ctxt.data = xmalloc_bytes(ctxt.size);
     if ( !ctxt.data )
         return -ENOMEM;
 
-    if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt)) != 0 )
-        printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" (%d)\n",
-               d->domain_id, typecode, rv);
-    else if ( rv = -ENOENT, ctxt.cur >= sizeof(*desc) )
+    if ( is_single_instance )
+        vcpu_pause(d->vcpu[instance]);
+    else
+        domain_pause(d);
+
+    if ( is_single_instance )
     {
-        uint32_t off;
+        if ( hvm_sr_handlers[typecode].save_one != NULL )
+            rv = hvm_sr_handlers[typecode].save_one(d->vcpu[instance],
+                                                    &ctxt);
+        else
+            rv = hvm_sr_handlers[typecode].save(d, &ctxt);
 
-        for ( off = 0; off <= (ctxt.cur - sizeof(*desc)); off += desc->length )
+        if ( rv != 0 )
         {
-            desc = (void *)(ctxt.data + off);
-            /* Move past header */
-            off += sizeof(*desc);
-            if ( ctxt.cur < desc->length ||
-                 off > ctxt.cur - desc->length )
-                break;
-            if ( instance == desc->instance )
-            {
-                rv = 0;
-                if ( guest_handle_is_null(handle) )
-                    *bufsz = desc->length;
-                else if ( *bufsz < desc->length )
-                    rv = -ENOBUFS;
-                else if ( copy_to_guest(handle, ctxt.data + off, desc->length) 
)
-                    rv = -EFAULT;
-                else
-                    *bufsz = desc->length;
-                break;
-            }
+            printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" 
(%d)\n",
+                   d->domain_id, typecode, rv);
+            vcpu_unpause(d->vcpu[instance]);
+        }
+        else if ( ctxt.cur >= sizeof(*desc) )
+        {
+            rv = -ENOENT;
+            desc = (void *)(ctxt.data);
+             /* Move past header */
+            off = sizeof(*desc);
+             if ( ctxt.cur < desc->length ||
+                  off > ctxt.cur - desc->length )
+                rv = -EFAULT;
+            rv = 0;
+            if ( guest_handle_is_null(handle) )
+                *bufsz = desc->length;
+            else if ( *bufsz < desc->length )
+               rv = -ENOBUFS;
+            else if ( copy_to_guest(handle, ctxt.data + off, desc->length) )
+                rv = -EFAULT;
+            else
+                *bufsz = desc->length;
+            vcpu_unpause(d->vcpu[instance]);
         }
     }
+    else
+    {
+        for_each_vcpu ( d, v )
+        {
+            if ( (rv = hvm_sr_handlers[typecode].save(d, &ctxt)) != 0 )
+            {
+                printk(XENLOG_G_ERR "HVM%d save: failed to save type %"PRIu16" 
(%d)\n",
+                       d->domain_id, typecode, rv);
+            }
+            else if ( ctxt.cur >= sizeof(*desc) )
+            {
+                rv = -ENOENT;
+                desc = (void *)(ctxt.data + off);
+                /* Move past header */
+                off += sizeof(*desc);
+                if ( ctxt.cur < desc->length ||
+                     off > ctxt.cur - desc->length )
+                    break;
+                if ( instance == desc->instance )
+                {
+                    rv = 0;
+                    if ( guest_handle_is_null(handle) )
+                        *bufsz = desc->length;
+                    else if ( *bufsz < desc->length )
+                        rv = -ENOBUFS;
+                    else if ( copy_to_guest(handle, ctxt.data + off, 
desc->length) )
+                        rv = -EFAULT;
+                    else
+                        *bufsz = desc->length;
+                    break;
+                }
+                off += desc->length;
+             }
+         }
+        domain_unpause(d);
+     }
 
     xfree(ctxt.data);
     return rv;
@@ -196,7 +250,9 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
     struct hvm_save_header hdr;
     struct hvm_save_end end;
     hvm_save_handler handler;
-    unsigned int i;
+    hvm_save_one_handler save_one_handler;
+    unsigned int i, rc;
+    struct vcpu *v = NULL;
 
     if ( d->is_dying )
         return -EINVAL;
@@ -224,11 +280,32 @@ int hvm_save(struct domain *d, hvm_domain_context_t *h)
     for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ )
     {
         handler = hvm_sr_handlers[i].save;
-        if ( handler != NULL )
+        save_one_handler = hvm_sr_handlers[i].save_one;
+        if ( save_one_handler != NULL )
         {
             printk(XENLOG_G_INFO "HVM%d save: %s\n",
                    d->domain_id, hvm_sr_handlers[i].name);
-            if ( handler(d, h) != 0 )
+            for_each_vcpu ( d, v )
+            {
+                rc = save_one_handler(v, h);
+
+                if( rc != 0 )
+                {
+                    printk(XENLOG_G_ERR
+                           "HVM%d save: failed to save type %"PRIu16"\n",
+                           d->domain_id, i);
+                    return -EFAULT;
+                }
+            }
+        }
+        else if ( handler != NULL )
+        {
+            printk(XENLOG_G_INFO "HVM%d save: %s\n",
+                   d->domain_id, hvm_sr_handlers[i].name);
+
+            rc = handler(d, h);
+
+            if( rc != 0 )
             {
                 printk(XENLOG_G_ERR
                        "HVM%d save: failed to save type %"PRIu16"\n",
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.