[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v16 05/13] x86/hvm: Introduce hvm_save_cpu_msrs_one func


  • To: xen-devel@xxxxxxxxxxxxx
  • From: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Date: Thu, 9 Aug 2018 12:20:53 +0300
  • Cc: wei.liu2@xxxxxxxxxx, andrew.cooper3@xxxxxxxxxx, ian.jackson@xxxxxxxxxxxxx, paul.durrant@xxxxxxxxxx, jbeulich@xxxxxxxx, Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
  • Comment: DomainKeys? See http://domainkeys.sourceforge.net/
  • Delivery-date: Thu, 09 Aug 2018 09:21:21 +0000
  • Domainkey-signature: a=rsa-sha1; q=dns; c=nofws; s=default; d=bitdefender.com; b=fu+9GNPkMT9oYB6Mm9+wpmvRHLKCWWhgIo6lX87Sp+6yjVM9gfbGr8L3wyEosj7FFFNY9bbK1103T3NKJP9gwi4Lks8mggS8Tuvi6W0tLqpR67exzDAiyu9fis4bN3GgcTROQq8L6077KC22BoQkjnv7cJdR0vEgz4EOU+KJGbNxyqRwVAIzLn3WpRcqRIdfoSSpTeF8vR25F/6XSs+ys9DVgpLu8468qzbTvy+xdU8N+rffoHhUvgk9R0bYI+4+6fVjkzcLTOGTC5P3dEi9eqNdnRNNsXSABWhWEgJHE9BRk+w3nueRJ61BYoO69RQIFk26IK5uQTopCYXiOHAMow==; h=Received:Received:Received:Received:From:To:Cc:Subject:Date:Message-Id:X-Mailer:In-Reply-To:References;
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

This is used to save data from a single instance.

Signed-off-by: Alexandru Isaila <aisaila@xxxxxxxxxxxxxxx>
Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>

---
Changes since V14:
        - Remove err init
        - Add blank line ahead of return.
---
 xen/arch/x86/hvm/hvm.c | 106 +++++++++++++++++++++++++++----------------------
 1 file changed, 59 insertions(+), 47 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5b0820e..7df8744 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -1364,69 +1364,81 @@ static const uint32_t msrs_to_send[] = {
 };
 static unsigned int __read_mostly msr_count_max = ARRAY_SIZE(msrs_to_send);
 
-static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
+static int hvm_save_cpu_msrs_one(struct vcpu *v, hvm_domain_context_t *h)
 {
-    struct vcpu *v;
+    struct hvm_save_descriptor *desc = _p(&h->data[h->cur]);
+    struct hvm_msr *ctxt;
+    unsigned int i;
+    int err;
 
-    for_each_vcpu ( d, v )
+    err = _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id,
+                         HVM_CPU_MSR_SIZE(msr_count_max));
+    if ( err )
+        return err;
+    ctxt = (struct hvm_msr *)&h->data[h->cur];
+    ctxt->count = 0;
+
+    for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
     {
-        struct hvm_save_descriptor *desc = _p(&h->data[h->cur]);
-        struct hvm_msr *ctxt;
-        unsigned int i;
+        uint64_t val;
+        int rc = guest_rdmsr(v, msrs_to_send[i], &val);
 
-        if ( _hvm_init_entry(h, CPU_MSR_CODE, v->vcpu_id,
-                             HVM_CPU_MSR_SIZE(msr_count_max)) )
-            return 1;
-        ctxt = (struct hvm_msr *)&h->data[h->cur];
-        ctxt->count = 0;
+        /*
+         * It is the programmers responsibility to ensure that
+         * msrs_to_send[] contain generally-read/write MSRs.
+         * X86EMUL_EXCEPTION here implies a missing feature, and that the
+         * guest doesn't have access to the MSR.
+         */
+        if ( rc == X86EMUL_EXCEPTION )
+            continue;
 
-        for ( i = 0; i < ARRAY_SIZE(msrs_to_send); ++i )
+        if ( rc != X86EMUL_OKAY )
         {
-            uint64_t val;
-            int rc = guest_rdmsr(v, msrs_to_send[i], &val);
+            ASSERT_UNREACHABLE();
+            return -ENXIO;
+        }
 
-            /*
-             * It is the programmers responsibility to ensure that
-             * msrs_to_send[] contain generally-read/write MSRs.
-             * X86EMUL_EXCEPTION here implies a missing feature, and that the
-             * guest doesn't have access to the MSR.
-             */
-            if ( rc == X86EMUL_EXCEPTION )
-                continue;
+        if ( !val )
+            continue; /* Skip empty MSRs. */
 
-            if ( rc != X86EMUL_OKAY )
-            {
-                ASSERT_UNREACHABLE();
-                return -ENXIO;
-            }
+        ctxt->msr[ctxt->count].index = msrs_to_send[i];
+        ctxt->msr[ctxt->count++].val = val;
+    }
 
-            if ( !val )
-                continue; /* Skip empty MSRs. */
+    if ( hvm_funcs.save_msr )
+        hvm_funcs.save_msr(v, ctxt);
 
-            ctxt->msr[ctxt->count].index = msrs_to_send[i];
-            ctxt->msr[ctxt->count++].val = val;
-        }
+    ASSERT(ctxt->count <= msr_count_max);
 
-        if ( hvm_funcs.save_msr )
-            hvm_funcs.save_msr(v, ctxt);
+    for ( i = 0; i < ctxt->count; ++i )
+        ctxt->msr[i]._rsvd = 0;
 
-        ASSERT(ctxt->count <= msr_count_max);
+    if ( ctxt->count )
+    {
+        /* Rewrite length to indicate how much space we actually used. */
+        desc->length = HVM_CPU_MSR_SIZE(ctxt->count);
+        h->cur += HVM_CPU_MSR_SIZE(ctxt->count);
+    }
+    else
+        /* or rewind and remove the descriptor from the stream. */
+        h->cur -= sizeof(struct hvm_save_descriptor);
 
-        for ( i = 0; i < ctxt->count; ++i )
-            ctxt->msr[i]._rsvd = 0;
+    return 0;
+}
 
-        if ( ctxt->count )
-        {
-            /* Rewrite length to indicate how much space we actually used. */
-            desc->length = HVM_CPU_MSR_SIZE(ctxt->count);
-            h->cur += HVM_CPU_MSR_SIZE(ctxt->count);
-        }
-        else
-            /* or rewind and remove the descriptor from the stream. */
-            h->cur -= sizeof(struct hvm_save_descriptor);
+static int hvm_save_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
+{
+    struct vcpu *v;
+    int err = 0;
+
+    for_each_vcpu ( d, v )
+    {
+        err = hvm_save_cpu_msrs_one(v, h);
+        if ( err )
+            break;
     }
 
-    return 0;
+    return err;
 }
 
 static int hvm_load_cpu_msrs(struct domain *d, hvm_domain_context_t *h)
-- 
2.7.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.