[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] x86/hvm: Propagate real error information up through hvm_load()


  • To: Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
  • Date: Mon, 19 Jul 2021 12:14:49 +0100
  • Authentication-results: esa5.hc3370-68.iphmx.com; dkim=none (message not signed) header.i=none
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Jan Beulich <JBeulich@xxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Igor Druzhinin <igor.druzhinin@xxxxxxxxxx>
  • Delivery-date: Mon, 19 Jul 2021 11:15:06 +0000
  • Ironport-hdrordr: A9a23:BtZIm60jOjDB6HHXLmh0GwqjBLwkLtp133Aq2lEZdPRUGvb4qy nIpoV86faUskd3ZJhOo6HiBEDtexzhHP1OkO0s1NWZLWvbUQKTRekIh+aP/9SJIVyGygc378 ddmsZFZuEYdWIK6PrH3A==
  • Ironport-sdr: dT0JMVsJCMbXIwje6y29gsCetd4Sq95HhqSLYkttjKlY8Wapf5Kr83ekJHCCMqpctTorRKf13g oMKJHQPXUGTseu9MDmpanTk8WugParkPzOOU3vSgNkJc46a5fteGEuAsQ+1d/uoKyp4a3F18Gv 0KWhf+33A7CKOfa3OGwLg3u1Qs9fPtUNIr1u5bMkgCYTFqAziSd455+HRFGeSZ3eDwIYI3gYv2 9CrLpZsSWey08kjQlifaGEpU9C85gzfqSPcVaXok5aziiwIX+XQHONvhSF11N7yy4J2Q1sXlDR rs9JlRHSorQcgvkRe2W7JUq1
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

hvm_load() is currently a mix of -errno and -1 style error handling, which
aliases -EPERM.  This leads to the following confusing diagnostics:

>From userspace:
  xc: info: Restoring domain
  xc: error: Unable to restore HVM context (1 = Operation not permitted): 
Internal error
  xc: error: Restore failed (1 = Operation not permitted): Internal error
  xc_domain_restore: [1] Restore failed (1 = Operation not permitted)

>From Xen:
  (XEN) HVM10.0 restore: inconsistent xsave state (feat=0x2ff accum=0x21f 
xcr0=0x7 bv=0x3 err=-22)
  (XEN) HVM10 restore: failed to load entry 16/0

The actual error was a bad backport, but the -EINVAL got converted to -EPERM
on the way out of the hypercall.

The overwhelming majority of *_load() handlers already use -errno consistenty.
Fix up the rest to be consistent, and fix a few other errors noticed along the
way.

 * Failures of hvm_load_entry() indicate a truncated record or other bad data
   size.  Use -ENODATA.
 * Don't use {g,}dprintk().  Omitting diagnostics in release builds is rude,
   and almost everything uses unconditional printk()'s.
 * Switch some errors for more appropriate ones.

Reported-by: Igor Druzhinin <igor.druzhinin@xxxxxxxxxx>
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Igor Druzhinin <igor.druzhinin@xxxxxxxxxx>
---
 xen/arch/x86/cpu/mcheck/vmce.c |  6 +++---
 xen/arch/x86/emul-i8254.c      |  9 +++++----
 xen/arch/x86/hvm/irq.c         |  6 +++---
 xen/arch/x86/hvm/save.c        | 25 ++++++++++++++-----------
 xen/arch/x86/hvm/vioapic.c     |  5 ++++-
 xen/arch/x86/hvm/vpic.c        |  2 +-
 6 files changed, 30 insertions(+), 23 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index b1df9e9efd66..eb6434a3ba20 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -82,11 +82,11 @@ int vmce_restore_vcpu(struct vcpu *v, const struct 
hvm_vmce_vcpu *ctxt)
 
     if ( ctxt->caps & ~guest_mcg_cap & ~MCG_CAP_COUNT & ~MCG_CTL_P )
     {
-        dprintk(XENLOG_G_ERR, "%s restore: unsupported MCA capabilities"
-                " %#" PRIx64 " for %pv (supported: %#Lx)\n",
+        printk(XENLOG_G_ERR
+               "%s restore: unsupported MCA capabilities %#"PRIx64" for %pv 
(supported: %#Lx)\n",
                 is_hvm_vcpu(v) ? "HVM" : "PV", ctxt->caps,
                 v, guest_mcg_cap & ~MCG_CAP_COUNT);
-        return -EPERM;
+        return -EINVAL;
     }
 
     v->arch.vmce.mcg_cap = ctxt->caps;
diff --git a/xen/arch/x86/emul-i8254.c b/xen/arch/x86/emul-i8254.c
index 73be4188ad41..83d7156799c8 100644
--- a/xen/arch/x86/emul-i8254.c
+++ b/xen/arch/x86/emul-i8254.c
@@ -412,7 +412,7 @@ static int pit_save(struct vcpu *v, hvm_domain_context_t *h)
 static int pit_load(struct domain *d, hvm_domain_context_t *h)
 {
     PITState *pit = domain_vpit(d);
-    int i;
+    int i, rc = 0;
 
     if ( !has_vpit(d) )
         return -ENODEV;
@@ -421,8 +421,8 @@ static int pit_load(struct domain *d, hvm_domain_context_t 
*h)
 
     if ( hvm_load_entry(PIT, h, &pit->hw) )
     {
-        spin_unlock(&pit->lock);
-        return 1;
+        rc = -ENODEV;
+        goto out;
     }
     
     /*
@@ -434,9 +434,10 @@ static int pit_load(struct domain *d, hvm_domain_context_t 
*h)
     for ( i = 0; i < 3; i++ )
         pit_load_count(pit, i, pit->hw.channels[i].count);
 
+ out:
     spin_unlock(&pit->lock);
 
-    return 0;
+    return rc;
 }
 
 HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load, 1, HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 38ac5fb6c7c2..52aae4565f0c 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -773,9 +773,9 @@ static int irq_load_link(struct domain *d, 
hvm_domain_context_t *h)
     for ( link = 0; link < 4; link++ )
         if ( hvm_irq->pci_link.route[link] > 15 )
         {
-            gdprintk(XENLOG_ERR, 
-                     "HVM restore: PCI-ISA link %u out of range (%u)\n",
-                     link, hvm_irq->pci_link.route[link]);
+            printk(XENLOG_G_ERR
+                   "HVM restore: PCI-ISA link %u out of range (%u)\n",
+                   link, hvm_irq->pci_link.route[link]);
             return -EINVAL;
         }
 
diff --git a/xen/arch/x86/hvm/save.c b/xen/arch/x86/hvm/save.c
index 584620985bf5..86c82cbd7456 100644
--- a/xen/arch/x86/hvm/save.c
+++ b/xen/arch/x86/hvm/save.c
@@ -51,14 +51,14 @@ int arch_hvm_load(struct domain *d, struct hvm_save_header 
*hdr)
     {
         printk(XENLOG_G_ERR "HVM%d restore: bad magic number %#"PRIx32"\n",
                d->domain_id, hdr->magic);
-        return -1;
+        return -EINVAL;
     }
 
     if ( hdr->version != HVM_FILE_VERSION )
     {
         printk(XENLOG_G_ERR "HVM%d restore: unsupported version %u\n",
                d->domain_id, hdr->version);
-        return -1;
+        return -EINVAL;
     }
 
     cpuid(1, &eax, &ebx, &ecx, &edx);
@@ -294,16 +294,18 @@ int hvm_load(struct domain *d, hvm_domain_context_t *h)
     struct hvm_save_descriptor *desc;
     hvm_load_handler handler;
     struct vcpu *v;
+    int rc;
 
     if ( d->is_dying )
         return -EINVAL;
 
     /* Read the save header, which must be first */
     if ( hvm_load_entry(HEADER, h, &hdr) != 0 )
-        return -1;
+        return -ENODATA;
 
-    if ( arch_hvm_load(d, &hdr) )
-        return -1;
+    rc = arch_hvm_load(d, &hdr);
+    if ( rc )
+        return rc;
 
     /* Down all the vcpus: we only re-enable the ones that had state saved. */
     for_each_vcpu(d, v)
@@ -318,7 +320,7 @@ int hvm_load(struct domain *d, hvm_domain_context_t *h)
             printk(XENLOG_G_ERR
                    "HVM%d restore: save did not end with a null entry\n",
                    d->domain_id);
-            return -1;
+            return -ENODATA;
         }
 
         /* Read the typecode of the next entry  and check for the end-marker */
@@ -332,17 +334,18 @@ int hvm_load(struct domain *d, hvm_domain_context_t *h)
         {
             printk(XENLOG_G_ERR "HVM%d restore: unknown entry typecode %u\n",
                    d->domain_id, desc->typecode);
-            return -1;
+            return -EINVAL;
         }
 
         /* Load the entry */
         printk(XENLOG_G_INFO "HVM%d restore: %s %"PRIu16"\n", d->domain_id,
                hvm_sr_handlers[desc->typecode].name, desc->instance);
-        if ( handler(d, h) != 0 )
+        rc = handler(d, h);
+        if ( rc )
         {
-            printk(XENLOG_G_ERR "HVM%d restore: failed to load entry %u/%u\n",
-                   d->domain_id, desc->typecode, desc->instance);
-            return -1;
+            printk(XENLOG_G_ERR "HVM%d restore: failed to load entry %u/%u rc 
%d\n",
+                   d->domain_id, desc->typecode, desc->instance, rc);
+            return rc;
         }
         process_pending_softirqs();
     }
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index 87370dd4172b..553c0f76eff8 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -620,7 +620,10 @@ static int ioapic_load(struct domain *d, 
hvm_domain_context_t *h)
          d->arch.hvm.nr_vioapics != 1 )
         return -EOPNOTSUPP;
 
-    return hvm_load_entry(IOAPIC, h, &s->domU);
+    if ( hvm_load_entry(IOAPIC, h, &s->domU) )
+        return -ENODATA;
+
+    return 0;
 }
 
 HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index f465b7f9979a..af988a868c8a 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -430,7 +430,7 @@ static int vpic_load(struct domain *d, hvm_domain_context_t 
*h)
 
     /* Which PIC is this? */
     if ( inst > 1 )
-        return -EINVAL;
+        return -ENOENT;
     s = &d->arch.hvm.vpic[inst];
 
     /* Load the state */
-- 
2.11.0




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.