[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v10 03/11] viridian: use stack variables for viridian_vcpu and viridian_domain...



...where there is more than one dereference inside a function.

This shortens the code and makes it more readable. No functional change.

Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: "Roger Pau Monné" <roger.pau@xxxxxxxxxx>

v4:
 - New in v4
---
 xen/arch/x86/hvm/viridian/synic.c    | 49 ++++++++++++++++------------
 xen/arch/x86/hvm/viridian/time.c     | 27 ++++++++-------
 xen/arch/x86/hvm/viridian/viridian.c | 47 +++++++++++++-------------
 3 files changed, 69 insertions(+), 54 deletions(-)

diff --git a/xen/arch/x86/hvm/viridian/synic.c 
b/xen/arch/x86/hvm/viridian/synic.c
index 28eda7798c..f3d9f7ae74 100644
--- a/xen/arch/x86/hvm/viridian/synic.c
+++ b/xen/arch/x86/hvm/viridian/synic.c
@@ -30,7 +30,8 @@ typedef union _HV_VP_ASSIST_PAGE
 
 void viridian_apic_assist_set(const struct vcpu *v)
 {
-    HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian->vp_assist.ptr;
+    struct viridian_vcpu *vv = v->arch.hvm.viridian;
+    HV_VP_ASSIST_PAGE *ptr = vv->vp_assist.ptr;
 
     if ( !ptr )
         return;
@@ -40,25 +41,25 @@ void viridian_apic_assist_set(const struct vcpu *v)
      * wrong and the VM will most likely hang so force a crash now
      * to make the problem clear.
      */
-    if ( v->arch.hvm.viridian->apic_assist_pending )
+    if ( vv->apic_assist_pending )
         domain_crash(v->domain);
 
-    v->arch.hvm.viridian->apic_assist_pending = true;
+    vv->apic_assist_pending = true;
     ptr->ApicAssist.no_eoi = 1;
 }
 
 bool viridian_apic_assist_completed(const struct vcpu *v)
 {
-    HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian->vp_assist.ptr;
+    struct viridian_vcpu *vv = v->arch.hvm.viridian;
+    HV_VP_ASSIST_PAGE *ptr = vv->vp_assist.ptr;
 
     if ( !ptr )
         return false;
 
-    if ( v->arch.hvm.viridian->apic_assist_pending &&
-         !ptr->ApicAssist.no_eoi )
+    if ( vv->apic_assist_pending && !ptr->ApicAssist.no_eoi )
     {
         /* An EOI has been avoided */
-        v->arch.hvm.viridian->apic_assist_pending = false;
+        vv->apic_assist_pending = false;
         return true;
     }
 
@@ -67,17 +68,20 @@ bool viridian_apic_assist_completed(const struct vcpu *v)
 
 void viridian_apic_assist_clear(const struct vcpu *v)
 {
-    HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian->vp_assist.ptr;
+    struct viridian_vcpu *vv = v->arch.hvm.viridian;
+    HV_VP_ASSIST_PAGE *ptr = vv->vp_assist.ptr;
 
     if ( !ptr )
         return;
 
     ptr->ApicAssist.no_eoi = 0;
-    v->arch.hvm.viridian->apic_assist_pending = false;
+    vv->apic_assist_pending = false;
 }
 
 int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
 {
+    struct viridian_vcpu *vv = v->arch.hvm.viridian;
+
     switch ( idx )
     {
     case HV_X64_MSR_EOI:
@@ -95,12 +99,11 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, 
uint64_t val)
 
     case HV_X64_MSR_VP_ASSIST_PAGE:
         /* release any previous mapping */
-        viridian_unmap_guest_page(&v->arch.hvm.viridian->vp_assist);
-        v->arch.hvm.viridian->vp_assist.msr.raw = val;
-        viridian_dump_guest_page(v, "VP_ASSIST",
-                                 &v->arch.hvm.viridian->vp_assist);
-        if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
-            viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
+        viridian_unmap_guest_page(&vv->vp_assist);
+        vv->vp_assist.msr.raw = val;
+        viridian_dump_guest_page(v, "VP_ASSIST", &vv->vp_assist);
+        if ( vv->vp_assist.msr.fields.enabled )
+            viridian_map_guest_page(v, &vv->vp_assist);
         break;
 
     default:
@@ -146,18 +149,22 @@ int viridian_synic_rdmsr(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
 void viridian_synic_save_vcpu_ctxt(const struct vcpu *v,
                                    struct hvm_viridian_vcpu_context *ctxt)
 {
-    ctxt->apic_assist_pending = v->arch.hvm.viridian->apic_assist_pending;
-    ctxt->vp_assist_msr = v->arch.hvm.viridian->vp_assist.msr.raw;
+    const struct viridian_vcpu *vv = v->arch.hvm.viridian;
+
+    ctxt->apic_assist_pending = vv->apic_assist_pending;
+    ctxt->vp_assist_msr = vv->vp_assist.msr.raw;
 }
 
 void viridian_synic_load_vcpu_ctxt(
     struct vcpu *v, const struct hvm_viridian_vcpu_context *ctxt)
 {
-    v->arch.hvm.viridian->vp_assist.msr.raw = ctxt->vp_assist_msr;
-    if ( v->arch.hvm.viridian->vp_assist.msr.fields.enabled )
-        viridian_map_guest_page(v, &v->arch.hvm.viridian->vp_assist);
+    struct viridian_vcpu *vv = v->arch.hvm.viridian;
+
+    vv->vp_assist.msr.raw = ctxt->vp_assist_msr;
+    if ( vv->vp_assist.msr.fields.enabled )
+        viridian_map_guest_page(v, &vv->vp_assist);
 
-    v->arch.hvm.viridian->apic_assist_pending = ctxt->apic_assist_pending;
+    vv->apic_assist_pending = ctxt->apic_assist_pending;
 }
 
 /*
diff --git a/xen/arch/x86/hvm/viridian/time.c b/xen/arch/x86/hvm/viridian/time.c
index a7e94aadf0..76f9612001 100644
--- a/xen/arch/x86/hvm/viridian/time.c
+++ b/xen/arch/x86/hvm/viridian/time.c
@@ -141,6 +141,7 @@ void viridian_time_ref_count_thaw(const struct domain *d)
 int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val)
 {
     struct domain *d = v->domain;
+    struct viridian_domain *vd = d->arch.hvm.viridian;
 
     switch ( idx )
     {
@@ -148,9 +149,9 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, 
uint64_t val)
         if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
             return X86EMUL_EXCEPTION;
 
-        d->arch.hvm.viridian->reference_tsc.raw = val;
+        vd->reference_tsc.raw = val;
         dump_reference_tsc(d);
-        if ( d->arch.hvm.viridian->reference_tsc.fields.enabled )
+        if ( vd->reference_tsc.fields.enabled )
             update_reference_tsc(d, true);
         break;
 
@@ -165,7 +166,8 @@ int viridian_time_wrmsr(struct vcpu *v, uint32_t idx, 
uint64_t val)
 
 int viridian_time_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val)
 {
-    struct domain *d = v->domain;
+    const struct domain *d = v->domain;
+    struct viridian_domain *vd = d->arch.hvm.viridian;
 
     switch ( idx )
     {
@@ -187,13 +189,12 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
         if ( !(viridian_feature_mask(d) & HVMPV_reference_tsc) )
             return X86EMUL_EXCEPTION;
 
-        *val = d->arch.hvm.viridian->reference_tsc.raw;
+        *val = vd->reference_tsc.raw;
         break;
 
     case HV_X64_MSR_TIME_REF_COUNT:
     {
-        struct viridian_time_ref_count *trc =
-            &d->arch.hvm.viridian->time_ref_count;
+        struct viridian_time_ref_count *trc = &vd->time_ref_count;
 
         if ( !(viridian_feature_mask(d) & HVMPV_time_ref_count) )
             return X86EMUL_EXCEPTION;
@@ -217,17 +218,21 @@ int viridian_time_rdmsr(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
 void viridian_time_save_domain_ctxt(
     const struct domain *d, struct hvm_viridian_domain_context *ctxt)
 {
-    ctxt->time_ref_count = d->arch.hvm.viridian->time_ref_count.val;
-    ctxt->reference_tsc = d->arch.hvm.viridian->reference_tsc.raw;
+    const struct viridian_domain *vd = d->arch.hvm.viridian;
+
+    ctxt->time_ref_count = vd->time_ref_count.val;
+    ctxt->reference_tsc = vd->reference_tsc.raw;
 }
 
 void viridian_time_load_domain_ctxt(
     struct domain *d, const struct hvm_viridian_domain_context *ctxt)
 {
-    d->arch.hvm.viridian->time_ref_count.val = ctxt->time_ref_count;
-    d->arch.hvm.viridian->reference_tsc.raw = ctxt->reference_tsc;
+    struct viridian_domain *vd = d->arch.hvm.viridian;
+
+    vd->time_ref_count.val = ctxt->time_ref_count;
+    vd->reference_tsc.raw = ctxt->reference_tsc;
 
-    if ( d->arch.hvm.viridian->reference_tsc.fields.enabled )
+    if ( vd->reference_tsc.fields.enabled )
         update_reference_tsc(d, false);
 }
 
diff --git a/xen/arch/x86/hvm/viridian/viridian.c 
b/xen/arch/x86/hvm/viridian/viridian.c
index 7839718ef4..710470fed7 100644
--- a/xen/arch/x86/hvm/viridian/viridian.c
+++ b/xen/arch/x86/hvm/viridian/viridian.c
@@ -122,6 +122,7 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t 
leaf,
                            uint32_t subleaf, struct cpuid_leaf *res)
 {
     const struct domain *d = v->domain;
+    const struct viridian_domain *vd = d->arch.hvm.viridian;
 
     ASSERT(is_viridian_domain(d));
     ASSERT(leaf >= 0x40000000 && leaf < 0x40000100);
@@ -146,7 +147,7 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t 
leaf,
          * Hypervisor information, but only if the guest has set its
          * own version number.
          */
-        if ( d->arch.hvm.viridian->guest_os_id.raw == 0 )
+        if ( vd->guest_os_id.raw == 0 )
             break;
         res->a = viridian_build;
         res->b = ((uint32_t)viridian_major << 16) | viridian_minor;
@@ -191,8 +192,7 @@ void cpuid_viridian_leaves(const struct vcpu *v, uint32_t 
leaf,
 
     case 4:
         /* Recommended hypercall usage. */
-        if ( (d->arch.hvm.viridian->guest_os_id.raw == 0) ||
-             (d->arch.hvm.viridian->guest_os_id.fields.os < 4) )
+        if ( vd->guest_os_id.raw == 0 || vd->guest_os_id.fields.os < 4 )
             break;
         res->a = CPUID4A_RELAX_TIMER_INT;
         if ( viridian_feature_mask(d) & HVMPV_hcall_remote_tlb_flush )
@@ -281,21 +281,23 @@ static void enable_hypercall_page(struct domain *d)
 
 int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, uint64_t val)
 {
+    struct viridian_vcpu *vv = v->arch.hvm.viridian;
     struct domain *d = v->domain;
+    struct viridian_domain *vd = d->arch.hvm.viridian;
 
     ASSERT(is_viridian_domain(d));
 
     switch ( idx )
     {
     case HV_X64_MSR_GUEST_OS_ID:
-        d->arch.hvm.viridian->guest_os_id.raw = val;
+        vd->guest_os_id.raw = val;
         dump_guest_os_id(d);
         break;
 
     case HV_X64_MSR_HYPERCALL:
-        d->arch.hvm.viridian->hypercall_gpa.raw = val;
+        vd->hypercall_gpa.raw = val;
         dump_hypercall(d);
-        if ( d->arch.hvm.viridian->hypercall_gpa.fields.enabled )
+        if ( vd->hypercall_gpa.fields.enabled )
             enable_hypercall_page(d);
         break;
 
@@ -317,10 +319,10 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, 
uint64_t val)
     case HV_X64_MSR_CRASH_P3:
     case HV_X64_MSR_CRASH_P4:
         BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
-                     ARRAY_SIZE(v->arch.hvm.viridian->crash_param));
+                     ARRAY_SIZE(vv->crash_param));
 
         idx -= HV_X64_MSR_CRASH_P0;
-        v->arch.hvm.viridian->crash_param[idx] = val;
+        vv->crash_param[idx] = val;
         break;
 
     case HV_X64_MSR_CRASH_CTL:
@@ -337,11 +339,8 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, 
uint64_t val)
         spin_unlock(&d->shutdown_lock);
 
         gprintk(XENLOG_WARNING, "VIRIDIAN CRASH: %lx %lx %lx %lx %lx\n",
-                v->arch.hvm.viridian->crash_param[0],
-                v->arch.hvm.viridian->crash_param[1],
-                v->arch.hvm.viridian->crash_param[2],
-                v->arch.hvm.viridian->crash_param[3],
-                v->arch.hvm.viridian->crash_param[4]);
+                vv->crash_param[0], vv->crash_param[1], vv->crash_param[2],
+                vv->crash_param[3], vv->crash_param[4]);
         break;
     }
 
@@ -357,18 +356,20 @@ int guest_wrmsr_viridian(struct vcpu *v, uint32_t idx, 
uint64_t val)
 
 int guest_rdmsr_viridian(const struct vcpu *v, uint32_t idx, uint64_t *val)
 {
-    struct domain *d = v->domain;
+    const struct viridian_vcpu *vv = v->arch.hvm.viridian;
+    const struct domain *d = v->domain;
+    const struct viridian_domain *vd = d->arch.hvm.viridian;
 
     ASSERT(is_viridian_domain(d));
 
     switch ( idx )
     {
     case HV_X64_MSR_GUEST_OS_ID:
-        *val = d->arch.hvm.viridian->guest_os_id.raw;
+        *val = vd->guest_os_id.raw;
         break;
 
     case HV_X64_MSR_HYPERCALL:
-        *val = d->arch.hvm.viridian->hypercall_gpa.raw;
+        *val = vd->hypercall_gpa.raw;
         break;
 
     case HV_X64_MSR_VP_INDEX:
@@ -393,10 +394,10 @@ int guest_rdmsr_viridian(const struct vcpu *v, uint32_t 
idx, uint64_t *val)
     case HV_X64_MSR_CRASH_P3:
     case HV_X64_MSR_CRASH_P4:
         BUILD_BUG_ON(HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0 >=
-                     ARRAY_SIZE(v->arch.hvm.viridian->crash_param));
+                     ARRAY_SIZE(vv->crash_param));
 
         idx -= HV_X64_MSR_CRASH_P0;
-        *val = v->arch.hvm.viridian->crash_param[idx];
+        *val = vv->crash_param[idx];
         break;
 
     case HV_X64_MSR_CRASH_CTL:
@@ -665,9 +666,10 @@ static int viridian_save_domain_ctxt(struct vcpu *v,
                                      hvm_domain_context_t *h)
 {
     const struct domain *d = v->domain;
+    const struct viridian_domain *vd = d->arch.hvm.viridian;
     struct hvm_viridian_domain_context ctxt = {
-        .hypercall_gpa = d->arch.hvm.viridian->hypercall_gpa.raw,
-        .guest_os_id = d->arch.hvm.viridian->guest_os_id.raw,
+        .hypercall_gpa = vd->hypercall_gpa.raw,
+        .guest_os_id = vd->guest_os_id.raw,
     };
 
     if ( !is_viridian_domain(d) )
@@ -681,13 +683,14 @@ static int viridian_save_domain_ctxt(struct vcpu *v,
 static int viridian_load_domain_ctxt(struct domain *d,
                                      hvm_domain_context_t *h)
 {
+    struct viridian_domain *vd = d->arch.hvm.viridian;
     struct hvm_viridian_domain_context ctxt;
 
     if ( hvm_load_entry_zeroextend(VIRIDIAN_DOMAIN, h, &ctxt) != 0 )
         return -EINVAL;
 
-    d->arch.hvm.viridian->hypercall_gpa.raw = ctxt.hypercall_gpa;
-    d->arch.hvm.viridian->guest_os_id.raw = ctxt.guest_os_id;
+    vd->hypercall_gpa.raw = ctxt.hypercall_gpa;
+    vd->guest_os_id.raw = ctxt.guest_os_id;
 
     viridian_time_load_domain_ctxt(d, &ctxt);
 
-- 
2.20.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.