[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/4] x86/svm: Clean up intinfo_t variables



The type name is poor because the type is also used for the IDT vectoring
field, not just for the event injection field.  Rename it to intinfo_t which
is how the APM refers to the data.

Rearrange the union to drop the .fields infix, and rename bytes to the more
common raw.

While adjusting all call sites, fix up style issues and make use of structure
assignments where applicable.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wl@xxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/hvm/svm/intr.c        | 32 ++++++++----------
 xen/arch/x86/hvm/svm/nestedsvm.c   | 28 +++++++---------
 xen/arch/x86/hvm/svm/svm.c         | 68 ++++++++++++++++++--------------------
 xen/arch/x86/hvm/svm/svmdebug.c    | 12 +++----
 xen/include/asm-x86/hvm/svm/vmcb.h | 22 ++++++------
 5 files changed, 75 insertions(+), 87 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/intr.c b/xen/arch/x86/hvm/svm/intr.c
index ff755165cd..4eede5cc23 100644
--- a/xen/arch/x86/hvm/svm/intr.c
+++ b/xen/arch/x86/hvm/svm/intr.c
@@ -43,15 +43,13 @@ static void svm_inject_nmi(struct vcpu *v)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
     u32 general1_intercepts = vmcb_get_general1_intercepts(vmcb);
-    eventinj_t event;
 
-    event.bytes = 0;
-    event.fields.v = 1;
-    event.fields.type = X86_EVENTTYPE_NMI;
-    event.fields.vector = 2;
-
-    ASSERT(vmcb->eventinj.fields.v == 0);
-    vmcb->eventinj = event;
+    ASSERT(!vmcb->eventinj.v);
+    vmcb->eventinj = (intinfo_t){
+        .vector = 2,
+        .type = X86_EVENTTYPE_NMI,
+        .v = true,
+    };
 
     /*
      * SVM does not virtualise the NMI mask, so we emulate it by intercepting
@@ -64,15 +62,13 @@ static void svm_inject_nmi(struct vcpu *v)
 static void svm_inject_extint(struct vcpu *v, int vector)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
-    eventinj_t event;
-
-    event.bytes = 0;
-    event.fields.v = 1;
-    event.fields.type = X86_EVENTTYPE_EXT_INTR;
-    event.fields.vector = vector;
 
-    ASSERT(vmcb->eventinj.fields.v == 0);
-    vmcb->eventinj = event;
+    ASSERT(!vmcb->eventinj.v);
+    vmcb->eventinj = (intinfo_t){
+        .vector = vector,
+        .type = X86_EVENTTYPE_EXT_INTR,
+        .v = true,
+    };
 }
 
 static void svm_enable_intr_window(struct vcpu *v, struct hvm_intack intack)
@@ -99,7 +95,7 @@ static void svm_enable_intr_window(struct vcpu *v, struct 
hvm_intack intack)
     }
 
     HVMTRACE_3D(INTR_WINDOW, intack.vector, intack.source,
-                vmcb->eventinj.fields.v?vmcb->eventinj.fields.vector:-1);
+                vmcb->eventinj.v ? vmcb->eventinj.vector : -1);
 
     /*
      * Create a dummy virtual interrupt to intercept as soon as the
@@ -197,7 +193,7 @@ void svm_intr_assist(void)
          *      have cleared the interrupt out of the IRR.
          * 2. The IRQ is masked.
          */
-        if ( unlikely(vmcb->eventinj.fields.v) || intblk )
+        if ( unlikely(vmcb->eventinj.v) || intblk )
         {
             svm_enable_intr_window(v, intack);
             return;
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index fef124fb11..d279a50e5c 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -340,7 +340,7 @@ static int nsvm_vcpu_hostrestore(struct vcpu *v, struct 
cpu_user_regs *regs)
     /* Clear exitintinfo to prevent a fault loop of re-injecting
      * exceptions forever.
      */
-    n1vmcb->exitintinfo.bytes = 0;
+    n1vmcb->exitintinfo.raw = 0;
 
     /* Cleanbits */
     n1vmcb->cleanbits.bytes = 0;
@@ -806,13 +806,10 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct 
cpu_user_regs *regs,
 
         switch (exitcode) {
         case VMEXIT_INTR:
-            if ( unlikely(ns_vmcb->eventinj.fields.v)
-                && nv->nv_vmentry_pending
-                && hvm_event_needs_reinjection(ns_vmcb->eventinj.fields.type,
-                    ns_vmcb->eventinj.fields.vector) )
-            {
-                ns_vmcb->exitintinfo.bytes = ns_vmcb->eventinj.bytes;
-            }
+            if ( unlikely(ns_vmcb->eventinj.v) && nv->nv_vmentry_pending &&
+                 hvm_event_needs_reinjection(ns_vmcb->eventinj.type,
+                                             ns_vmcb->eventinj.vector) )
+                ns_vmcb->exitintinfo = ns_vmcb->eventinj;
             break;
         case VMEXIT_EXCEPTION_PF:
             ns_vmcb->_cr2 = ns_vmcb->exitinfo2;
@@ -837,7 +834,7 @@ nsvm_vcpu_vmexit_inject(struct vcpu *v, struct 
cpu_user_regs *regs,
     }
 
     ns_vmcb->exitcode = exitcode;
-    ns_vmcb->eventinj.bytes = 0;
+    ns_vmcb->eventinj.raw = 0;
     return 0;
 }
 
@@ -1077,14 +1074,12 @@ nsvm_vmcb_prepare4vmexit(struct vcpu *v, struct 
cpu_user_regs *regs)
      * only happens on a VMRUN instruction intercept which has no valid
      * exitintinfo set.
      */
-    if ( unlikely(n2vmcb->eventinj.fields.v) &&
-         hvm_event_needs_reinjection(n2vmcb->eventinj.fields.type,
-                                     n2vmcb->eventinj.fields.vector) )
-    {
+    if ( unlikely(n2vmcb->eventinj.v) &&
+         hvm_event_needs_reinjection(n2vmcb->eventinj.type,
+                                     n2vmcb->eventinj.vector) )
         ns_vmcb->exitintinfo = n2vmcb->eventinj;
-    }
 
-    ns_vmcb->eventinj.bytes = 0;
+    ns_vmcb->eventinj.raw = 0;
 
     /* Nested paging mode */
     if (nestedhvm_paging_mode_hap(v)) {
@@ -1249,7 +1244,8 @@ enum hvm_intblk nsvm_intr_blocked(struct vcpu *v)
         if ( v->arch.hvm.hvm_io.io_req.state != STATE_IOREQ_NONE )
             return hvm_intblk_shadow;
 
-        if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.bytes != 0 ) {
+        if ( !nv->nv_vmexit_pending && n2vmcb->exitintinfo.v )
+        {
             /* Give the l2 guest a chance to finish the delivery of
              * the last injected interrupt or exception before we
              * emulate a VMEXIT (e.g. VMEXIT(INTR) ).
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index c5ac03b0b1..263ae03bfd 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -259,12 +259,12 @@ static int svm_vmcb_save(struct vcpu *v, struct 
hvm_hw_cpu *c)
     c->sysenter_esp = v->arch.hvm.svm.guest_sysenter_esp;
     c->sysenter_eip = v->arch.hvm.svm.guest_sysenter_eip;
 
-    if ( vmcb->eventinj.fields.v &&
-         hvm_event_needs_reinjection(vmcb->eventinj.fields.type,
-                                     vmcb->eventinj.fields.vector) )
+    if ( vmcb->eventinj.v &&
+         hvm_event_needs_reinjection(vmcb->eventinj.type,
+                                     vmcb->eventinj.vector) )
     {
-        c->pending_event = (uint32_t)vmcb->eventinj.bytes;
-        c->error_code = vmcb->eventinj.fields.errorcode;
+        c->pending_event = vmcb->eventinj.raw;
+        c->error_code = vmcb->eventinj.ec;
     }
 
     return 1;
@@ -339,11 +339,11 @@ static int svm_vmcb_restore(struct vcpu *v, struct 
hvm_hw_cpu *c)
     {
         gdprintk(XENLOG_INFO, "Re-injecting %#"PRIx32", %#"PRIx32"\n",
                  c->pending_event, c->error_code);
-        vmcb->eventinj.bytes = c->pending_event;
-        vmcb->eventinj.fields.errorcode = c->error_code;
+        vmcb->eventinj.raw = c->pending_event;
+        vmcb->eventinj.ec = c->error_code;
     }
     else
-        vmcb->eventinj.bytes = 0;
+        vmcb->eventinj.raw = 0;
 
     vmcb->cleanbits.bytes = 0;
     paging_update_paging_modes(v);
@@ -1301,7 +1301,7 @@ static void svm_inject_event(const struct x86_event 
*event)
 {
     struct vcpu *curr = current;
     struct vmcb_struct *vmcb = curr->arch.hvm.svm.vmcb;
-    eventinj_t eventinj = vmcb->eventinj;
+    intinfo_t eventinj = vmcb->eventinj;
     struct x86_event _event = *event;
     struct cpu_user_regs *regs = guest_cpu_user_regs();
 
@@ -1342,18 +1342,15 @@ static void svm_inject_event(const struct x86_event 
*event)
         break;
     }
 
-    if ( unlikely(eventinj.fields.v) &&
-         (eventinj.fields.type == X86_EVENTTYPE_HW_EXCEPTION) )
+    if ( eventinj.v && (eventinj.type == X86_EVENTTYPE_HW_EXCEPTION) )
     {
         _event.vector = hvm_combine_hw_exceptions(
-            eventinj.fields.vector, _event.vector);
+            eventinj.vector, _event.vector);
         if ( _event.vector == TRAP_double_fault )
             _event.error_code = 0;
     }
 
-    eventinj.bytes = 0;
-    eventinj.fields.v = 1;
-    eventinj.fields.vector = _event.vector;
+    eventinj = (intinfo_t){ .vector = _event.vector, .v = true };
 
     /*
      * Refer to AMD Vol 2: System Programming, 15.20 Event Injection.
@@ -1373,7 +1370,7 @@ static void svm_inject_event(const struct x86_event 
*event)
             vmcb->nextrip = regs->rip + _event.insn_len;
         else
             regs->rip += _event.insn_len;
-        eventinj.fields.type = X86_EVENTTYPE_SW_INTERRUPT;
+        eventinj.type = X86_EVENTTYPE_SW_INTERRUPT;
         break;
 
     case X86_EVENTTYPE_PRI_SW_EXCEPTION: /* icebp */
@@ -1385,7 +1382,7 @@ static void svm_inject_event(const struct x86_event 
*event)
         regs->rip += _event.insn_len;
         if ( cpu_has_svm_nrips )
             vmcb->nextrip = regs->rip;
-        eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
+        eventinj.type = X86_EVENTTYPE_HW_EXCEPTION;
         break;
 
     case X86_EVENTTYPE_SW_EXCEPTION: /* int3, into */
@@ -1397,13 +1394,13 @@ static void svm_inject_event(const struct x86_event 
*event)
             vmcb->nextrip = regs->rip + _event.insn_len;
         else
             regs->rip += _event.insn_len;
-        eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
+        eventinj.type = X86_EVENTTYPE_HW_EXCEPTION;
         break;
 
     default:
-        eventinj.fields.type = X86_EVENTTYPE_HW_EXCEPTION;
-        eventinj.fields.ev = (_event.error_code != X86_EVENT_NO_EC);
-        eventinj.fields.errorcode = _event.error_code;
+        eventinj.type = X86_EVENTTYPE_HW_EXCEPTION;
+        eventinj.ev = (_event.error_code != X86_EVENT_NO_EC);
+        eventinj.ec = _event.error_code;
         break;
     }
 
@@ -1417,8 +1414,7 @@ static void svm_inject_event(const struct x86_event 
*event)
         vmcb->nextrip = (uint32_t)vmcb->nextrip;
     }
 
-    ASSERT(!eventinj.fields.ev ||
-           eventinj.fields.errorcode == (uint16_t)eventinj.fields.errorcode);
+    ASSERT(!eventinj.ev || eventinj.ec == (uint16_t)eventinj.ec);
     vmcb->eventinj = eventinj;
 
     if ( _event.vector == TRAP_page_fault &&
@@ -1431,7 +1427,7 @@ static void svm_inject_event(const struct x86_event 
*event)
 
 static bool svm_event_pending(const struct vcpu *v)
 {
-    return v->arch.hvm.svm.vmcb->eventinj.fields.v;
+    return v->arch.hvm.svm.vmcb->eventinj.v;
 }
 
 static void svm_cpu_dead(unsigned int cpu)
@@ -2410,12 +2406,12 @@ static bool svm_get_pending_event(struct vcpu *v, 
struct x86_event *info)
 {
     const struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
-    if ( vmcb->eventinj.fields.v )
+    if ( vmcb->eventinj.v )
         return false;
 
-    info->vector = vmcb->eventinj.fields.vector;
-    info->type = vmcb->eventinj.fields.type;
-    info->error_code = vmcb->eventinj.fields.errorcode;
+    info->vector = vmcb->eventinj.vector;
+    info->type = vmcb->eventinj.type;
+    info->error_code = vmcb->eventinj.ec;
 
     return true;
 }
@@ -2602,9 +2598,9 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
     vmcb->cleanbits.bytes = cpu_has_svm_cleanbits ? ~0u : 0u;
 
     /* Event delivery caused this intercept? Queue for redelivery. */
-    if ( unlikely(vmcb->exitintinfo.fields.v) &&
-         hvm_event_needs_reinjection(vmcb->exitintinfo.fields.type,
-                                     vmcb->exitintinfo.fields.vector) )
+    if ( unlikely(vmcb->exitintinfo.v) &&
+         hvm_event_needs_reinjection(vmcb->exitintinfo.type,
+                                     vmcb->exitintinfo.vector) )
         vmcb->eventinj = vmcb->exitintinfo;
 
     switch ( exit_reason )
@@ -2765,9 +2761,9 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
          * switches.
          */
         insn_len = -1;
-        if ( vmcb->exitintinfo.fields.v )
+        if ( vmcb->exitintinfo.v )
         {
-            switch ( vmcb->exitintinfo.fields.type )
+            switch ( vmcb->exitintinfo.type )
             {
                 /*
                  * #BP and #OF are from INT3/INTO respectively.  #DB from
@@ -2775,8 +2771,8 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
                  * semantics.
                  */
             case X86_EVENTTYPE_HW_EXCEPTION:
-                if ( vmcb->exitintinfo.fields.vector == TRAP_int3 ||
-                     vmcb->exitintinfo.fields.vector == TRAP_overflow )
+                if ( vmcb->exitintinfo.vector == TRAP_int3 ||
+                     vmcb->exitintinfo.vector == TRAP_overflow )
                     break;
                 /* Fallthrough */
             case X86_EVENTTYPE_EXT_INTR:
@@ -2789,7 +2785,7 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
              * The common logic above will have forwarded the vectoring
              * information.  Undo this as we are going to emulate.
              */
-            vmcb->eventinj.bytes = 0;
+            vmcb->eventinj.raw = 0;
         }
 
         /*
diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c
index 4293d8dba5..26e4b9d7bb 100644
--- a/xen/arch/x86/hvm/svm/svmdebug.c
+++ b/xen/arch/x86/hvm/svm/svmdebug.c
@@ -55,11 +55,11 @@ void svm_vmcb_dump(const char *from, const struct 
vmcb_struct *vmcb)
            vmcb->tlb_control, vmcb_get_vintr(vmcb).bytes,
            vmcb->interrupt_shadow);
     printk("eventinj %016"PRIx64", valid? %d, ec? %d, type %u, vector %#x\n",
-           vmcb->eventinj.bytes, vmcb->eventinj.fields.v,
-           vmcb->eventinj.fields.ev, vmcb->eventinj.fields.type,
-           vmcb->eventinj.fields.vector);
+           vmcb->eventinj.raw, vmcb->eventinj.v,
+           vmcb->eventinj.ev, vmcb->eventinj.type,
+           vmcb->eventinj.vector);
     printk("exitcode = %#"PRIx64" exitintinfo = %#"PRIx64"\n",
-           vmcb->exitcode, vmcb->exitintinfo.bytes);
+           vmcb->exitcode, vmcb->exitintinfo.raw);
     printk("exitinfo1 = %#"PRIx64" exitinfo2 = %#"PRIx64"\n",
            vmcb->exitinfo1, vmcb->exitinfo2);
     printk("np_enable = %#"PRIx64" guest_asid = %#x\n",
@@ -164,9 +164,9 @@ bool svm_vmcb_isvalid(const char *from, const struct 
vmcb_struct *vmcb,
         PRINTF("GENERAL2_INTERCEPT: VMRUN intercept bit is clear 
(%#"PRIx32")\n",
                vmcb_get_general2_intercepts(vmcb));
 
-    if ( vmcb->eventinj.fields.resvd1 )
+    if ( vmcb->eventinj.resvd1 )
         PRINTF("eventinj: MBZ bits are set (%#"PRIx64")\n",
-               vmcb->eventinj.bytes);
+               vmcb->eventinj.raw);
 
 #undef PRINTF
     return ret;
diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h 
b/xen/include/asm-x86/hvm/svm/vmcb.h
index e37220edf2..fc67a88660 100644
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -306,17 +306,17 @@ enum VMEXIT_EXITCODE
 
 typedef union
 {
-    u64 bytes;
     struct
     {
-        u64 vector:    8;
-        u64 type:      3;
-        u64 ev:        1;
-        u64 resvd1:   19;
-        u64 v:         1;
-        u64 errorcode:32;
-    } fields;
-} eventinj_t;
+        uint8_t  vector;
+        uint8_t  type:3;
+        bool     ev:1;
+        uint32_t resvd1:19;
+        bool     v:1;
+        uint32_t ec;
+    };
+    uint64_t raw;
+} intinfo_t;
 
 typedef union
 {
@@ -420,10 +420,10 @@ struct vmcb_struct {
     u64 exitcode;               /* offset 0x70 */
     u64 exitinfo1;              /* offset 0x78 */
     u64 exitinfo2;              /* offset 0x80 */
-    eventinj_t  exitintinfo;    /* offset 0x88 */
+    intinfo_t exitintinfo;      /* offset 0x88 */
     u64 _np_enable;             /* offset 0x90 - cleanbit 4 */
     u64 res08[2];
-    eventinj_t  eventinj;       /* offset 0xA8 */
+    intinfo_t eventinj;         /* offset 0xA8 */
     u64 _h_cr3;                 /* offset 0xB0 - cleanbit 4 */
     virt_ext_t virt_ext;        /* offset 0xB8 */
     vmcbcleanbits_t cleanbits;  /* offset 0xC0 */
-- 
2.11.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.