[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [RFC PATCH 1/3] xen/hvm-save: Refactor HVM_REGISTER_SAVE_RESTORE



The instance id in a save record served two purposes.  For a PER_VCPU record,
it was the VCPU id while for a PER_DOM it was just an index.

As the number of instances needs to be stored to help fix hvm_save_one() later
in this series, refactor HVM_REGISTER_SAVE_RESTORE to simplify the interface
and prevent the buggy case of registering a PER_VCPU record with multiple
instances.  The 'kind' can now be inferred from the number of instances.

There is now HVM_REGISTER_SAVE_RESTORE_PER_DOM() and
HVM_REGISTER_SAVE_RESTORE_PER_VCPU() which both take fewer arguments, and only
PER_DOM() allows setting a number of instances.

There is no observable change as a result of this patch.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CC: Keir Fraser <keir@xxxxxxx>
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Don Slutz <dslutz@xxxxxxxxxxx>
CC: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
---
 xen/arch/x86/cpu/mcheck/vmce.c |    4 ++--
 xen/arch/x86/hvm/hpet.c        |    2 +-
 xen/arch/x86/hvm/hvm.c         |   11 +++++------
 xen/arch/x86/hvm/i8254.c       |    2 +-
 xen/arch/x86/hvm/irq.c         |    9 +++------
 xen/arch/x86/hvm/mtrr.c        |    3 +--
 xen/arch/x86/hvm/pmtimer.c     |    3 +--
 xen/arch/x86/hvm/rtc.c         |    2 +-
 xen/arch/x86/hvm/vioapic.c     |    2 +-
 xen/arch/x86/hvm/viridian.c    |    8 ++++----
 xen/arch/x86/hvm/vlapic.c      |    6 ++----
 xen/arch/x86/hvm/vpic.c        |    2 +-
 xen/common/hvm/save.c          |   13 ++++++++-----
 xen/include/xen/hvm/save.h     |   35 ++++++++++++++++++++---------------
 14 files changed, 51 insertions(+), 51 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index f6c35db..a88368a 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -335,8 +335,8 @@ static int vmce_load_vcpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     return err ?: vmce_restore_vcpu(v, &ctxt);
 }
 
-HVM_REGISTER_SAVE_RESTORE(VMCE_VCPU, vmce_save_vcpu_ctxt,
-                          vmce_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
+HVM_REGISTER_SAVE_RESTORE_PER_VCPU(VMCE_VCPU, vmce_save_vcpu_ctxt,
+                                   vmce_load_vcpu_ctxt);
 
 /*
  * for Intel MCE, broadcast vMCE to all vcpus
diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
index 4324b52..fb2c098 100644
--- a/xen/arch/x86/hvm/hpet.c
+++ b/xen/arch/x86/hvm/hpet.c
@@ -569,7 +569,7 @@ static int hpet_load(struct domain *d, hvm_domain_context_t 
*h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(HPET, hpet_save, hpet_load, 1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE_PER_DOM(HPET, hpet_save, hpet_load, 1);
 
 void hpet_init(struct vcpu *v)
 {
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 69f7e74..eb21fc4 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -699,8 +699,8 @@ static int hvm_load_tsc_adjust(struct domain *d, 
hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(TSC_ADJUST, hvm_save_tsc_adjust,
-                          hvm_load_tsc_adjust, 1, HVMSR_PER_VCPU);
+HVM_REGISTER_SAVE_RESTORE_PER_VCPU(TSC_ADJUST, hvm_save_tsc_adjust,
+                                   hvm_load_tsc_adjust);
 
 static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
@@ -999,8 +999,7 @@ static int hvm_load_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt,
-                          1, HVMSR_PER_VCPU);
+HVM_REGISTER_SAVE_RESTORE_PER_VCPU(CPU, hvm_save_cpu_ctxt, hvm_load_cpu_ctxt);
 
 #define HVM_CPU_XSAVE_SIZE(xcr0) (offsetof(struct hvm_hw_cpu_xsave, \
                                            save_area) + \
@@ -1136,9 +1135,9 @@ static int __init 
__hvm_register_CPU_XSAVE_save_and_restore(void)
                         "CPU_XSAVE",
                         hvm_save_cpu_xsave_states,
                         hvm_load_cpu_xsave_states,
+                        0,
                         HVM_CPU_XSAVE_SIZE(xfeature_mask) +
-                            sizeof(struct hvm_save_descriptor),
-                        HVMSR_PER_VCPU);
+                        sizeof(struct hvm_save_descriptor));
     return 0;
 }
 __initcall(__hvm_register_CPU_XSAVE_save_and_restore);
diff --git a/xen/arch/x86/hvm/i8254.c b/xen/arch/x86/hvm/i8254.c
index c0d6bc2..139812a 100644
--- a/xen/arch/x86/hvm/i8254.c
+++ b/xen/arch/x86/hvm/i8254.c
@@ -423,7 +423,7 @@ static int pit_load(struct domain *d, hvm_domain_context_t 
*h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(PIT, pit_save, pit_load, 1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE_PER_DOM(PIT, pit_save, pit_load, 1);
 
 void pit_reset(struct domain *d)
 {
diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
index 677fbcd..04ce739 100644
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -658,9 +658,6 @@ static int irq_load_link(struct domain *d, 
hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(PCI_IRQ, irq_save_pci, irq_load_pci,
-                          1, HVMSR_PER_DOM);
-HVM_REGISTER_SAVE_RESTORE(ISA_IRQ, irq_save_isa, irq_load_isa, 
-                          1, HVMSR_PER_DOM);
-HVM_REGISTER_SAVE_RESTORE(PCI_LINK, irq_save_link, irq_load_link,
-                          1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE_PER_DOM(PCI_IRQ, irq_save_pci, irq_load_pci, 1);
+HVM_REGISTER_SAVE_RESTORE_PER_DOM(ISA_IRQ, irq_save_isa, irq_load_isa, 1);
+HVM_REGISTER_SAVE_RESTORE_PER_DOM(PCI_LINK, irq_save_link, irq_load_link, 1);
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 9937f5a..61c785c 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -677,8 +677,7 @@ static int hvm_load_mtrr_msr(struct domain *d, 
hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(MTRR, hvm_save_mtrr_msr, hvm_load_mtrr_msr,
-                          1, HVMSR_PER_VCPU);
+HVM_REGISTER_SAVE_RESTORE_PER_VCPU(MTRR, hvm_save_mtrr_msr, hvm_load_mtrr_msr);
 
 uint8_t epte_get_entry_emt(struct domain *d, unsigned long gfn, mfn_t mfn,
                            uint8_t *ipat, bool_t direct_mmio)
diff --git a/xen/arch/x86/hvm/pmtimer.c b/xen/arch/x86/hvm/pmtimer.c
index 01ae31d..282e8ee 100644
--- a/xen/arch/x86/hvm/pmtimer.c
+++ b/xen/arch/x86/hvm/pmtimer.c
@@ -292,8 +292,7 @@ static int pmtimer_load(struct domain *d, 
hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(PMTIMER, pmtimer_save, pmtimer_load, 
-                          1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE_PER_DOM(PMTIMER, pmtimer_save, pmtimer_load, 1);
 
 int pmtimer_change_ioport(struct domain *d, unsigned int version)
 {
diff --git a/xen/arch/x86/hvm/rtc.c b/xen/arch/x86/hvm/rtc.c
index cdedefe..8d9d634 100644
--- a/xen/arch/x86/hvm/rtc.c
+++ b/xen/arch/x86/hvm/rtc.c
@@ -741,7 +741,7 @@ static int rtc_load(struct domain *d, hvm_domain_context_t 
*h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(RTC, rtc_save, rtc_load, 1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE_PER_DOM(RTC, rtc_save, rtc_load, 1);
 
 void rtc_reset(struct domain *d)
 {
diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c
index d3c681b..7c75192 100644
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -428,7 +428,7 @@ static int ioapic_load(struct domain *d, 
hvm_domain_context_t *h)
     return hvm_load_entry(IOAPIC, h, s);
 }
 
-HVM_REGISTER_SAVE_RESTORE(IOAPIC, ioapic_save, ioapic_load, 1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE_PER_DOM(IOAPIC, ioapic_save, ioapic_load, 1);
 
 void vioapic_reset(struct domain *d)
 {
diff --git a/xen/arch/x86/hvm/viridian.c b/xen/arch/x86/hvm/viridian.c
index 2b86d66..0ba85b3 100644
--- a/xen/arch/x86/hvm/viridian.c
+++ b/xen/arch/x86/hvm/viridian.c
@@ -449,8 +449,8 @@ static int viridian_load_domain_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt,
-                          viridian_load_domain_ctxt, 1, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE_PER_DOM(VIRIDIAN_DOMAIN, viridian_save_domain_ctxt,
+                                  viridian_load_domain_ctxt, 1);
 
 static int viridian_save_vcpu_ctxt(struct domain *d, hvm_domain_context_t *h)
 {
@@ -493,5 +493,5 @@ static int viridian_load_vcpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(VIRIDIAN_VCPU, viridian_save_vcpu_ctxt,
-                          viridian_load_vcpu_ctxt, 1, HVMSR_PER_VCPU);
+HVM_REGISTER_SAVE_RESTORE_PER_VCPU(VIRIDIAN_VCPU, viridian_save_vcpu_ctxt,
+                                   viridian_load_vcpu_ctxt);
diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c
index bc06010..b64b9ee 100644
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -1220,10 +1220,8 @@ static int lapic_load_regs(struct domain *d, 
hvm_domain_context_t *h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(LAPIC, lapic_save_hidden, lapic_load_hidden,
-                          1, HVMSR_PER_VCPU);
-HVM_REGISTER_SAVE_RESTORE(LAPIC_REGS, lapic_save_regs, lapic_load_regs,
-                          1, HVMSR_PER_VCPU);
+HVM_REGISTER_SAVE_RESTORE_PER_VCPU(LAPIC, lapic_save_hidden, 
lapic_load_hidden);
+HVM_REGISTER_SAVE_RESTORE_PER_VCPU(LAPIC_REGS, lapic_save_regs, 
lapic_load_regs);
 
 int vlapic_init(struct vcpu *v)
 {
diff --git a/xen/arch/x86/hvm/vpic.c b/xen/arch/x86/hvm/vpic.c
index fea3f68..e882fe1 100644
--- a/xen/arch/x86/hvm/vpic.c
+++ b/xen/arch/x86/hvm/vpic.c
@@ -398,7 +398,7 @@ static int vpic_load(struct domain *d, hvm_domain_context_t 
*h)
     return 0;
 }
 
-HVM_REGISTER_SAVE_RESTORE(PIC, vpic_save, vpic_load, 2, HVMSR_PER_DOM);
+HVM_REGISTER_SAVE_RESTORE_PER_DOM(PIC, vpic_save, vpic_load, 2);
 
 void vpic_reset(struct domain *d)
 {
diff --git a/xen/common/hvm/save.c b/xen/common/hvm/save.c
index de76ada..2800c5b 100644
--- a/xen/common/hvm/save.c
+++ b/xen/common/hvm/save.c
@@ -36,15 +36,18 @@ static struct {
     hvm_load_handler load; 
     const char *name;
     size_t size;
-    int kind;
+    unsigned int num;
 } hvm_sr_handlers [HVM_SAVE_CODE_MAX + 1] = {{NULL, NULL, "<?>"},};
 
+#define is_per_vcpu_handler(_h) ((_h).num == 0)
+
 /* Init-time function to add entries to that list */
 void __init hvm_register_savevm(uint16_t typecode,
                                 const char *name,
                                 hvm_save_handler save_state,
                                 hvm_load_handler load_state,
-                                size_t size, int kind)
+                                unsigned int num,
+                                size_t size)
 {
     ASSERT(typecode <= HVM_SAVE_CODE_MAX);
     ASSERT(hvm_sr_handlers[typecode].save == NULL);
@@ -53,7 +56,7 @@ void __init hvm_register_savevm(uint16_t typecode,
     hvm_sr_handlers[typecode].load = load_state;
     hvm_sr_handlers[typecode].name = name;
     hvm_sr_handlers[typecode].size = size;
-    hvm_sr_handlers[typecode].kind = kind;
+    hvm_sr_handlers[typecode].num = num;
 }
 
 size_t hvm_save_size(struct domain *d) 
@@ -67,7 +70,7 @@ size_t hvm_save_size(struct domain *d)
 
     /* Plus space for each thing we will be saving */
     for ( i = 0; i <= HVM_SAVE_CODE_MAX; i++ ) 
-        if ( hvm_sr_handlers[i].kind == HVMSR_PER_VCPU )
+        if ( is_per_vcpu_handler(hvm_sr_handlers[i]) )
             for_each_vcpu(d, v)
                 sz += hvm_sr_handlers[i].size;
         else 
@@ -92,7 +95,7 @@ int hvm_save_one(struct domain *d, uint16_t typecode, 
uint16_t instance,
          || hvm_sr_handlers[typecode].save == NULL )
         return -EINVAL;
 
-    if ( hvm_sr_handlers[typecode].kind == HVMSR_PER_VCPU )
+    if ( is_per_vcpu_handler(hvm_sr_handlers[typecode]) )
         for_each_vcpu(d, v)
             sz += hvm_sr_handlers[typecode].size;
     else 
diff --git a/xen/include/xen/hvm/save.h b/xen/include/xen/hvm/save.h
index ae6f0bb..0e3ef13 100644
--- a/xen/include/xen/hvm/save.h
+++ b/xen/include/xen/hvm/save.h
@@ -94,30 +94,35 @@ typedef int (*hvm_save_handler) (struct domain *d,
 typedef int (*hvm_load_handler) (struct domain *d,
                                  hvm_domain_context_t *h);
 
-/* Init-time function to declare a pair of handlers for a type,
- * and the maximum buffer space needed to save this type of state */
+/* Init-time function to declare a pair of handlers for a type, and the
+ * maximum buffer space needed to save this type of state.  'num' of 0
+ * indicates a per-vcpu record, while 'num' of >0 indicates a per-domain
+ * record. */
 void hvm_register_savevm(uint16_t typecode,
                          const char *name, 
                          hvm_save_handler save_state,
                          hvm_load_handler load_state,
-                         size_t size, int kind);
-
-/* The space needed for saving can be per-domain or per-vcpu: */
-#define HVMSR_PER_DOM  0
-#define HVMSR_PER_VCPU 1
+                         unsigned int num, size_t size);
 
 /* Syntactic sugar around that function: specify the max number of
  * saves, and this calculates the size of buffer needed */
-#define HVM_REGISTER_SAVE_RESTORE(_x, _save, _load, _num, _k)             \
+#define HVM_REGISTER_SAVE_RESTORE_PER_DOM(_x, _save, _load, _num)         \
+static int __init __hvm_register_##_x##_save_and_restore(void)            \
+{                                                                         \
+    hvm_register_savevm(                                                  \
+        HVM_SAVE_CODE(_x), #_x, &_save, &_load, _num,                     \
+        (_num) * (HVM_SAVE_LENGTH(_x)                                     \
+                  + sizeof (struct hvm_save_descriptor)));                \
+    return 0;                                                             \
+}                                                                         \
+__initcall(__hvm_register_##_x##_save_and_restore);
+
+#define HVM_REGISTER_SAVE_RESTORE_PER_VCPU(_x, _save, _load)              \
 static int __init __hvm_register_##_x##_save_and_restore(void)            \
 {                                                                         \
-    hvm_register_savevm(HVM_SAVE_CODE(_x),                                \
-                        #_x,                                              \
-                        &_save,                                           \
-                        &_load,                                           \
-                        (_num) * (HVM_SAVE_LENGTH(_x)                     \
-                                  + sizeof (struct hvm_save_descriptor)), \
-                        _k);                                              \
+    hvm_register_savevm(                                                  \
+        HVM_SAVE_CODE(_x), #_x, &_save, &_load, 0,                        \
+        HVM_SAVE_LENGTH(_x) + sizeof (struct hvm_save_descriptor));       \
     return 0;                                                             \
 }                                                                         \
 __initcall(__hvm_register_##_x##_save_and_restore);
-- 
1.7.10.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.