[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] x86/svm: Alias the VMCB segment registers as an array



This allows svm_{get,set}_segment_register() to access the user segments by
array index, capitalising on x86_seg_* matches the hardware encoding.

While making this alteration, add some newlines for clarity, switch an int for
a bool, and make the functions fail safe in a release build, rather than
crashing Xen.

Bloat-o-meter reports some modest improvements:

  add/remove: 0/0 grow/shrink: 0/2 up/down: 0/-130 (-130)
  function                                     old     new   delta
  svm_set_segment_register                     662     653      -9
  svm_get_segment_register                     409     288    -121

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>
CC: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
---
 xen/arch/x86/hvm/svm/svm.c         | 76 ++++++++++++++++++--------------------
 xen/include/asm-x86/hvm/svm/vmcb.h | 17 ++++++---
 2 files changed, 47 insertions(+), 46 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 244da12..0dc9442 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -634,43 +634,39 @@ static void svm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
 
     switch ( seg )
     {
-    case x86_seg_cs:
-        *reg = vmcb->cs;
-        break;
-    case x86_seg_ds:
-        *reg = vmcb->ds;
-        break;
-    case x86_seg_es:
-        *reg = vmcb->es;
-        break;
-    case x86_seg_fs:
-        svm_sync_vmcb(v);
-        *reg = vmcb->fs;
-        break;
-    case x86_seg_gs:
+    case x86_seg_fs ... x86_seg_gs:
         svm_sync_vmcb(v);
-        *reg = vmcb->gs;
-        break;
-    case x86_seg_ss:
-        *reg = vmcb->ss;
-        reg->dpl = vmcb_get_cpl(vmcb);
+
+        /* Fallthrough. */
+    case x86_seg_es ... x86_seg_ds:
+        *reg = vmcb->sreg[seg];
+
+        if ( seg == x86_seg_ss )
+            reg->dpl = vmcb_get_cpl(vmcb);
         break;
+
     case x86_seg_tr:
         svm_sync_vmcb(v);
         *reg = vmcb->tr;
         break;
+
     case x86_seg_gdtr:
         *reg = vmcb->gdtr;
         break;
+
     case x86_seg_idtr:
         *reg = vmcb->idtr;
         break;
+
     case x86_seg_ldtr:
         svm_sync_vmcb(v);
         *reg = vmcb->ldtr;
         break;
+
     default:
-        BUG();
+        ASSERT_UNREACHABLE();
+        domain_crash(v->domain);
+        *reg = (struct segment_register){};
     }
 }
 
@@ -678,7 +674,7 @@ static void svm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
                                      struct segment_register *reg)
 {
     struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
-    int sync = 0;
+    bool sync = false;
 
     ASSERT((v == current) || !vcpu_runnable(v));
 
@@ -690,18 +686,23 @@ static void svm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
     case x86_seg_ss: /* cpl */
         vmcb->cleanbits.fields.seg = 0;
         break;
+
     case x86_seg_gdtr:
     case x86_seg_idtr:
         vmcb->cleanbits.fields.dt = 0;
         break;
+
     case x86_seg_fs:
     case x86_seg_gs:
     case x86_seg_tr:
     case x86_seg_ldtr:
         sync = (v == current);
         break;
+
     default:
-        break;
+        ASSERT_UNREACHABLE();
+        domain_crash(v->domain);
+        return;
     }
 
     if ( sync )
@@ -709,41 +710,36 @@ static void svm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
 
     switch ( seg )
     {
-    case x86_seg_cs:
-        vmcb->cs = *reg;
-        break;
-    case x86_seg_ds:
-        vmcb->ds = *reg;
-        break;
-    case x86_seg_es:
-        vmcb->es = *reg;
-        break;
-    case x86_seg_fs:
-        vmcb->fs = *reg;
-        break;
-    case x86_seg_gs:
-        vmcb->gs = *reg;
-        break;
     case x86_seg_ss:
-        vmcb->ss = *reg;
         vmcb_set_cpl(vmcb, reg->dpl);
+
+        /* Fallthrough */
+    case x86_seg_es ... x86_seg_cs:
+    case x86_seg_ds ... x86_seg_gs:
+        vmcb->sreg[seg] = *reg;
         break;
+
     case x86_seg_tr:
         vmcb->tr = *reg;
         break;
+
     case x86_seg_gdtr:
         vmcb->gdtr.base = reg->base;
         vmcb->gdtr.limit = reg->limit;
         break;
+
     case x86_seg_idtr:
         vmcb->idtr.base = reg->base;
         vmcb->idtr.limit = reg->limit;
         break;
+
     case x86_seg_ldtr:
         vmcb->ldtr = *reg;
         break;
-    default:
-        BUG();
+
+    case x86_seg_none:
+        ASSERT_UNREACHABLE();
+        break;
     }
 
     if ( sync )
diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h 
b/xen/include/asm-x86/hvm/svm/vmcb.h
index fa0d3e2..ec22d91 100644
--- a/xen/include/asm-x86/hvm/svm/vmcb.h
+++ b/xen/include/asm-x86/hvm/svm/vmcb.h
@@ -435,12 +435,17 @@ struct vmcb_struct {
     u8  guest_ins[15];          /* offset 0xD1 */
     u64 res10a[100];            /* offset 0xE0 pad to save area */
 
-    struct segment_register es;  /* offset 0x400 - cleanbit 8 */
-    struct segment_register cs;  /* cleanbit 8 */
-    struct segment_register ss;  /* cleanbit 8 */
-    struct segment_register ds;  /* cleanbit 8 */
-    struct segment_register fs;
-    struct segment_register gs;
+    union {
+        struct segment_register sreg[6];
+        struct {
+            struct segment_register es;  /* offset 0x400 - cleanbit 8 */
+            struct segment_register cs;  /* cleanbit 8 */
+            struct segment_register ss;  /* cleanbit 8 */
+            struct segment_register ds;  /* cleanbit 8 */
+            struct segment_register fs;
+            struct segment_register gs;
+        };
+    };
     struct segment_register gdtr; /* cleanbit 7 */
     struct segment_register ldtr;
     struct segment_register idtr; /* cleanbit 7 */
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.