[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/emul: Drop segment_attributes_t



commit 6c9abf0e8022807bb7d677570d0775659950ff1a
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon Jun 5 17:19:27 2017 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Jul 25 19:56:35 2017 +0100

    x86/emul: Drop segment_attributes_t
    
    The amount of namespace resolution is unnecessarily large, as all code deals
    in terms of struct segment_register.  This removes the attr.fields part of 
all
    references, and alters attr.bytes to just attr.
    
    Three areas of code using initialisers for segment_register are tweaked to
    compile with older versions of GCC.  arch_set_info_hvm_guest() has its SEG()
    macros altered to use plain comma-based initialisation, while
    {rm,vm86}_{cs,ds}_attr are simplified to plain numbers which matches their
    description in the manuals.
    
    No functional change.  (For some reason, the old {rm,vm86}_{cs,ds}_attr 
causes
    GCC to create variable in .rodata, whereas the new code uses immediate
    operands.  As a result, vmx_{get,set}_segment_register() are slightly
    shorter.)
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 tools/fuzz/x86_instruction_emulator/fuzz-emul.c |  10 +-
 tools/tests/x86_emulator/test_x86_emulator.c    |   2 +-
 xen/arch/x86/cpu/vpmu.c                         |   2 +-
 xen/arch/x86/hvm/domain.c                       |  43 ++++---
 xen/arch/x86/hvm/emulate.c                      |  20 +--
 xen/arch/x86/hvm/hvm.c                          | 154 ++++++++++++------------
 xen/arch/x86/hvm/svm/svm.c                      |  10 +-
 xen/arch/x86/hvm/svm/svmdebug.c                 |   4 +-
 xen/arch/x86/hvm/svm/vmcb.c                     |  16 +--
 xen/arch/x86/hvm/vmx/realmode.c                 |  10 +-
 xen/arch/x86/hvm/vmx/vmx.c                      |  41 +++----
 xen/arch/x86/mm/shadow/common.c                 |   6 +-
 xen/arch/x86/pv/emul-priv-op.c                  |  40 +++---
 xen/arch/x86/vm_event.c                         |   2 +-
 xen/arch/x86/x86_emulate/x86_emulate.c          |  55 +++++----
 xen/arch/x86/x86_emulate/x86_emulate.h          |  37 +++---
 16 files changed, 219 insertions(+), 233 deletions(-)

diff --git a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c 
b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
index aadbb40..a2329f8 100644
--- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
+++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c
@@ -583,7 +583,7 @@ static bool in_longmode(struct x86_emulate_ctxt *ctxt)
     const struct fuzz_state *s = ctxt->data;
     const struct fuzz_corpus *c = s->corpus;
 
-    return long_mode_active(ctxt) && c->segments[x86_seg_cs].attr.fields.l;
+    return long_mode_active(ctxt) && c->segments[x86_seg_cs].l;
 }
 
 static void set_sizes(struct x86_emulate_ctxt *ctxt)
@@ -597,8 +597,8 @@ static void set_sizes(struct x86_emulate_ctxt *ctxt)
         ctxt->addr_size = ctxt->sp_size = 64;
     else
     {
-        ctxt->addr_size = c->segments[x86_seg_cs].attr.fields.db ? 32 : 16;
-        ctxt->sp_size   = c->segments[x86_seg_ss].attr.fields.db ? 32 : 16;
+        ctxt->addr_size = c->segments[x86_seg_cs].db ? 32 : 16;
+        ctxt->sp_size   = c->segments[x86_seg_ss].db ? 32 : 16;
     }
 }
 
@@ -741,8 +741,8 @@ static void sanitize_input(struct x86_emulate_ctxt *ctxt)
     /* EFLAGS.VM implies 16-bit mode */
     if ( regs->rflags & X86_EFLAGS_VM )
     {
-        c->segments[x86_seg_cs].attr.fields.db = 0;
-        c->segments[x86_seg_ss].attr.fields.db = 0;
+        c->segments[x86_seg_cs].db = 0;
+        c->segments[x86_seg_ss].db = 0;
     }
 }
 
diff --git a/tools/tests/x86_emulator/test_x86_emulator.c 
b/tools/tests/x86_emulator/test_x86_emulator.c
index 1955332..76665ab 100644
--- a/tools/tests/x86_emulator/test_x86_emulator.c
+++ b/tools/tests/x86_emulator/test_x86_emulator.c
@@ -264,7 +264,7 @@ static int read_segment(
     if ( !is_x86_user_segment(seg) )
         return X86EMUL_UNHANDLEABLE;
     memset(reg, 0, sizeof(*reg));
-    reg->attr.fields.p = 1;
+    reg->p = 1;
     return X86EMUL_OKAY;
 }
 
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 21383d3..90954ca 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -304,7 +304,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
                 r->cs = seg.sel;
                 hvm_get_segment_register(sampled, x86_seg_ss, &seg);
                 r->ss = seg.sel;
-                r->cpl = seg.attr.fields.dpl;
+                r->cpl = seg.dpl;
                 if ( !(sampled->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE) )
                     *flags |= PMU_SAMPLE_REAL;
             }
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
index 293956c..7e11541 100644
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -27,13 +27,13 @@
 static int check_segment(struct segment_register *reg, enum x86_segment seg)
 {
 
-    if ( reg->attr.fields.pad != 0 )
+    if ( reg->pad != 0 )
     {
         gprintk(XENLOG_ERR, "Segment attribute bits 12-15 are not zero\n");
         return -EINVAL;
     }
 
-    if ( reg->attr.bytes == 0 )
+    if ( reg->attr == 0 )
     {
         if ( seg != x86_seg_ds && seg != x86_seg_es )
         {
@@ -45,26 +45,26 @@ static int check_segment(struct segment_register *reg, enum 
x86_segment seg)
 
     if ( seg == x86_seg_tr )
     {
-        if ( reg->attr.fields.s )
+        if ( reg->s )
         {
             gprintk(XENLOG_ERR, "Code or data segment provided for TR\n");
             return -EINVAL;
         }
 
-        if ( reg->attr.fields.type != SYS_DESC_tss_busy )
+        if ( reg->type != SYS_DESC_tss_busy )
         {
             gprintk(XENLOG_ERR, "Non-32-bit-TSS segment provided for TR\n");
             return -EINVAL;
         }
     }
-    else if ( !reg->attr.fields.s )
+    else if ( !reg->s )
     {
         gprintk(XENLOG_ERR,
                 "System segment provided for a code or data segment\n");
         return -EINVAL;
     }
 
-    if ( !reg->attr.fields.p )
+    if ( !reg->p )
     {
         gprintk(XENLOG_ERR, "Non-present segment provided\n");
         return -EINVAL;
@@ -73,7 +73,7 @@ static int check_segment(struct segment_register *reg, enum 
x86_segment seg)
     switch ( seg )
     {
     case x86_seg_cs:
-        if ( !(reg->attr.fields.type & 0x8) )
+        if ( !(reg->type & 0x8) )
         {
             gprintk(XENLOG_ERR, "Non-code segment provided for CS\n");
             return -EINVAL;
@@ -81,7 +81,7 @@ static int check_segment(struct segment_register *reg, enum 
x86_segment seg)
         break;
 
     case x86_seg_ss:
-        if ( (reg->attr.fields.type & 0x8) || !(reg->attr.fields.type & 0x2) )
+        if ( (reg->type & 0x8) || !(reg->type & 0x2) )
         {
             gprintk(XENLOG_ERR, "Non-writeable segment provided for SS\n");
             return -EINVAL;
@@ -90,7 +90,7 @@ static int check_segment(struct segment_register *reg, enum 
x86_segment seg)
 
     case x86_seg_ds:
     case x86_seg_es:
-        if ( (reg->attr.fields.type & 0x8) && !(reg->attr.fields.type & 0x2) )
+        if ( (reg->type & 0x8) && !(reg->type & 0x2) )
         {
             gprintk(XENLOG_ERR, "Non-readable segment provided for DS or 
ES\n");
             return -EINVAL;
@@ -136,12 +136,11 @@ int arch_set_info_hvm_guest(struct vcpu *v, const 
vcpu_hvm_context_t *ctx)
             return -EINVAL;
 
 #define SEG(s, r) ({                                                        \
-    s = (struct segment_register){ .base = (r)->s ## _base,                 \
-                                   .limit = (r)->s ## _limit,               \
-                                   .attr.bytes = (r)->s ## _ar };           \
+    s = (struct segment_register)                                           \
+        { 0, { (r)->s ## _ar }, (r)->s ## _base, (r)->s ## _limit };        \
     /* Set accessed / busy bit for present segments. */                     \
-    if ( s.attr.fields.p )                                                  \
-        s.attr.fields.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2);          \
+    if ( s.p )                                                              \
+        s.type |= (x86_seg_##s != x86_seg_tr ? 1 : 2);                      \
     check_segment(&s, x86_seg_ ## s); })
 
         rc = SEG(cs, regs);
@@ -156,7 +155,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const 
vcpu_hvm_context_t *ctx)
 
         /* Basic sanity checks. */
         limit = cs.limit;
-        if ( cs.attr.fields.g )
+        if ( cs.g )
             limit = (limit << 12) | 0xfff;
         if ( regs->eip > limit )
         {
@@ -165,24 +164,24 @@ int arch_set_info_hvm_guest(struct vcpu *v, const 
vcpu_hvm_context_t *ctx)
             return -EINVAL;
         }
 
-        if ( ss.attr.fields.dpl != cs.attr.fields.dpl )
+        if ( ss.dpl != cs.dpl )
         {
             gprintk(XENLOG_ERR, "SS.DPL (%u) is different than CS.DPL (%u)\n",
-                    ss.attr.fields.dpl, cs.attr.fields.dpl);
+                    ss.dpl, cs.dpl);
             return -EINVAL;
         }
 
-        if ( ds.attr.fields.p && ds.attr.fields.dpl > cs.attr.fields.dpl )
+        if ( ds.p && ds.dpl > cs.dpl )
         {
             gprintk(XENLOG_ERR, "DS.DPL (%u) is greater than CS.DPL (%u)\n",
-                    ds.attr.fields.dpl, cs.attr.fields.dpl);
+                    ds.dpl, cs.dpl);
             return -EINVAL;
         }
 
-        if ( es.attr.fields.p && es.attr.fields.dpl > cs.attr.fields.dpl )
+        if ( es.p && es.dpl > cs.dpl )
         {
             gprintk(XENLOG_ERR, "ES.DPL (%u) is greater than CS.DPL (%u)\n",
-                    es.attr.fields.dpl, cs.attr.fields.dpl);
+                    es.dpl, cs.dpl);
             return -EINVAL;
         }
 
@@ -260,7 +259,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const 
vcpu_hvm_context_t *ctx)
         v->arch.hvm_vcpu.guest_cr[4] = regs->cr4;
         v->arch.hvm_vcpu.guest_efer  = regs->efer;
 
-#define SEG(l, a) (struct segment_register){ .limit = (l), .attr.bytes = (a) }
+#define SEG(l, a) (struct segment_register){ 0, { a }, l, 0 }
         cs = SEG(~0u, 0xa9b); /* 64bit code segment. */
         ds = ss = es = SEG(~0u, 0xc93);
         tr = SEG(0x67, 0x8b); /* 64bit TSS (busy). */
diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index 495e312..99fc4ca 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -873,7 +873,7 @@ static int __hvmemul_read(
 
     if ( is_x86_system_segment(seg) )
         pfec |= PFEC_implicit;
-    else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
         pfec |= PFEC_user_mode;
 
     rc = hvmemul_virtual_to_linear(
@@ -995,7 +995,7 @@ static int hvmemul_write(
 
     if ( is_x86_system_segment(seg) )
         pfec |= PFEC_implicit;
-    else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
         pfec |= PFEC_user_mode;
 
     rc = hvmemul_virtual_to_linear(
@@ -1172,7 +1172,7 @@ static int hvmemul_rep_ins(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
         pfec |= PFEC_user_mode;
 
     rc = hvmemul_linear_to_phys(
@@ -1241,7 +1241,7 @@ static int hvmemul_rep_outs(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
         pfec |= PFEC_user_mode;
 
     rc = hvmemul_linear_to_phys(
@@ -1288,7 +1288,7 @@ static int hvmemul_rep_movs(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
         pfec |= PFEC_user_mode;
 
     if ( vio->mmio_access.read_access &&
@@ -1446,7 +1446,7 @@ static int hvmemul_rep_stos(
     {
         uint32_t pfec = PFEC_page_present | PFEC_write_access;
 
-        if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+        if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
             pfec |= PFEC_user_mode;
 
         rc = hvmemul_linear_to_phys(addr, &gpa, bytes_per_rep, reps, pfec,
@@ -2144,17 +2144,17 @@ void hvm_emulate_init_per_insn(
     hvmemul_ctxt->ctxt.lma = hvm_long_mode_active(curr);
 
     if ( hvmemul_ctxt->ctxt.lma &&
-         hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.l )
+         hvmemul_ctxt->seg_reg[x86_seg_cs].l )
         hvmemul_ctxt->ctxt.addr_size = hvmemul_ctxt->ctxt.sp_size = 64;
     else
     {
         hvmemul_ctxt->ctxt.addr_size =
-            hvmemul_ctxt->seg_reg[x86_seg_cs].attr.fields.db ? 32 : 16;
+            hvmemul_ctxt->seg_reg[x86_seg_cs].db ? 32 : 16;
         hvmemul_ctxt->ctxt.sp_size =
-            hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db ? 32 : 16;
+            hvmemul_ctxt->seg_reg[x86_seg_ss].db ? 32 : 16;
     }
 
-    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.dpl == 3 )
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
         pfec |= PFEC_user_mode;
 
     hvmemul_ctxt->insn_buf_eip = hvmemul_ctxt->ctxt.regs->rip;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0b1aba7..1ec37bd 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -802,49 +802,49 @@ static int hvm_save_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
         ctxt.cs_sel = seg.sel;
         ctxt.cs_limit = seg.limit;
         ctxt.cs_base = seg.base;
-        ctxt.cs_arbytes = seg.attr.bytes;
+        ctxt.cs_arbytes = seg.attr;
 
         hvm_get_segment_register(v, x86_seg_ds, &seg);
         ctxt.ds_sel = seg.sel;
         ctxt.ds_limit = seg.limit;
         ctxt.ds_base = seg.base;
-        ctxt.ds_arbytes = seg.attr.bytes;
+        ctxt.ds_arbytes = seg.attr;
 
         hvm_get_segment_register(v, x86_seg_es, &seg);
         ctxt.es_sel = seg.sel;
         ctxt.es_limit = seg.limit;
         ctxt.es_base = seg.base;
-        ctxt.es_arbytes = seg.attr.bytes;
+        ctxt.es_arbytes = seg.attr;
 
         hvm_get_segment_register(v, x86_seg_ss, &seg);
         ctxt.ss_sel = seg.sel;
         ctxt.ss_limit = seg.limit;
         ctxt.ss_base = seg.base;
-        ctxt.ss_arbytes = seg.attr.bytes;
+        ctxt.ss_arbytes = seg.attr;
 
         hvm_get_segment_register(v, x86_seg_fs, &seg);
         ctxt.fs_sel = seg.sel;
         ctxt.fs_limit = seg.limit;
         ctxt.fs_base = seg.base;
-        ctxt.fs_arbytes = seg.attr.bytes;
+        ctxt.fs_arbytes = seg.attr;
 
         hvm_get_segment_register(v, x86_seg_gs, &seg);
         ctxt.gs_sel = seg.sel;
         ctxt.gs_limit = seg.limit;
         ctxt.gs_base = seg.base;
-        ctxt.gs_arbytes = seg.attr.bytes;
+        ctxt.gs_arbytes = seg.attr;
 
         hvm_get_segment_register(v, x86_seg_tr, &seg);
         ctxt.tr_sel = seg.sel;
         ctxt.tr_limit = seg.limit;
         ctxt.tr_base = seg.base;
-        ctxt.tr_arbytes = seg.attr.bytes;
+        ctxt.tr_arbytes = seg.attr;
 
         hvm_get_segment_register(v, x86_seg_ldtr, &seg);
         ctxt.ldtr_sel = seg.sel;
         ctxt.ldtr_limit = seg.limit;
         ctxt.ldtr_base = seg.base;
-        ctxt.ldtr_arbytes = seg.attr.bytes;
+        ctxt.ldtr_arbytes = seg.attr;
 
         if ( v->fpu_initialised )
         {
@@ -1056,49 +1056,49 @@ static int hvm_load_cpu_ctxt(struct domain *d, 
hvm_domain_context_t *h)
     seg.sel = ctxt.cs_sel;
     seg.limit = ctxt.cs_limit;
     seg.base = ctxt.cs_base;
-    seg.attr.bytes = ctxt.cs_arbytes;
+    seg.attr = ctxt.cs_arbytes;
     hvm_set_segment_register(v, x86_seg_cs, &seg);
 
     seg.sel = ctxt.ds_sel;
     seg.limit = ctxt.ds_limit;
     seg.base = ctxt.ds_base;
-    seg.attr.bytes = ctxt.ds_arbytes;
+    seg.attr = ctxt.ds_arbytes;
     hvm_set_segment_register(v, x86_seg_ds, &seg);
 
     seg.sel = ctxt.es_sel;
     seg.limit = ctxt.es_limit;
     seg.base = ctxt.es_base;
-    seg.attr.bytes = ctxt.es_arbytes;
+    seg.attr = ctxt.es_arbytes;
     hvm_set_segment_register(v, x86_seg_es, &seg);
 
     seg.sel = ctxt.ss_sel;
     seg.limit = ctxt.ss_limit;
     seg.base = ctxt.ss_base;
-    seg.attr.bytes = ctxt.ss_arbytes;
+    seg.attr = ctxt.ss_arbytes;
     hvm_set_segment_register(v, x86_seg_ss, &seg);
 
     seg.sel = ctxt.fs_sel;
     seg.limit = ctxt.fs_limit;
     seg.base = ctxt.fs_base;
-    seg.attr.bytes = ctxt.fs_arbytes;
+    seg.attr = ctxt.fs_arbytes;
     hvm_set_segment_register(v, x86_seg_fs, &seg);
 
     seg.sel = ctxt.gs_sel;
     seg.limit = ctxt.gs_limit;
     seg.base = ctxt.gs_base;
-    seg.attr.bytes = ctxt.gs_arbytes;
+    seg.attr = ctxt.gs_arbytes;
     hvm_set_segment_register(v, x86_seg_gs, &seg);
 
     seg.sel = ctxt.tr_sel;
     seg.limit = ctxt.tr_limit;
     seg.base = ctxt.tr_base;
-    seg.attr.bytes = ctxt.tr_arbytes;
+    seg.attr = ctxt.tr_arbytes;
     hvm_set_segment_register(v, x86_seg_tr, &seg);
 
     seg.sel = ctxt.ldtr_sel;
     seg.limit = ctxt.ldtr_limit;
     seg.base = ctxt.ldtr_base;
-    seg.attr.bytes = ctxt.ldtr_arbytes;
+    seg.attr = ctxt.ldtr_arbytes;
     hvm_set_segment_register(v, x86_seg_ldtr, &seg);
 
     /* Cover xsave-absent save file restoration on xsave-capable host. */
@@ -1961,9 +1961,9 @@ int hvm_set_efer(uint64_t value)
          * When LME becomes set, clobber %cs.L to keep the guest firmly in
          * compatibility mode until it reloads %cs itself.
          */
-        if ( cs.attr.fields.l )
+        if ( cs.l )
         {
-            cs.attr.fields.l = 0;
+            cs.l = 0;
             hvm_set_segment_register(v, x86_seg_cs, &cs);
         }
     }
@@ -2425,14 +2425,14 @@ bool_t hvm_virtual_to_linear_addr(
             goto out;
     }
     else if ( hvm_long_mode_active(curr) &&
-              (is_x86_system_segment(seg) || active_cs->attr.fields.l) )
+              (is_x86_system_segment(seg) || active_cs->l) )
     {
         /*
          * User segments are always treated as present.  System segment may
          * not be, and also incur limit checks.
          */
         if ( is_x86_system_segment(seg) &&
-             (!reg->attr.fields.p || (offset + bytes - !!bytes) > reg->limit) )
+             (!reg->p || (offset + bytes - !!bytes) > reg->limit) )
             goto out;
 
         /*
@@ -2460,20 +2460,20 @@ bool_t hvm_virtual_to_linear_addr(
         addr = (uint32_t)(addr + reg->base);
 
         /* Segment not valid for use (cooked meaning of .p)? */
-        if ( !reg->attr.fields.p )
+        if ( !reg->p )
             goto out;
 
         /* Read/write restrictions only exist for user segments. */
-        if ( reg->attr.fields.s )
+        if ( reg->s )
         {
             switch ( access_type )
             {
             case hvm_access_read:
-                if ( (reg->attr.fields.type & 0xa) == 0x8 )
+                if ( (reg->type & 0xa) == 0x8 )
                     goto out; /* execute-only code segment */
                 break;
             case hvm_access_write:
-                if ( (reg->attr.fields.type & 0xa) != 0x2 )
+                if ( (reg->type & 0xa) != 0x2 )
                     goto out; /* not a writable data segment */
                 break;
             default:
@@ -2484,10 +2484,10 @@ bool_t hvm_virtual_to_linear_addr(
         last_byte = (uint32_t)offset + bytes - !!bytes;
 
         /* Is this a grows-down data segment? Special limit check if so. */
-        if ( reg->attr.fields.s && (reg->attr.fields.type & 0xc) == 0x4 )
+        if ( reg->s && (reg->type & 0xc) == 0x4 )
         {
             /* Is upper limit 0xFFFF or 0xFFFFFFFF? */
-            if ( !reg->attr.fields.db )
+            if ( !reg->db )
                 last_byte = (uint16_t)last_byte;
 
             /* Check first byte and last byte against respective bounds. */
@@ -2683,7 +2683,7 @@ static int hvm_load_segment_selector(
         segr.sel = sel;
         segr.base = (uint32_t)sel << 4;
         segr.limit = 0xffffu;
-        segr.attr.bytes = 0xf3;
+        segr.attr = 0xf3;
         hvm_set_segment_register(v, seg, &segr);
         return 0;
     }
@@ -2707,7 +2707,7 @@ static int hvm_load_segment_selector(
         v, (sel & 4) ? x86_seg_ldtr : x86_seg_gdtr, &desctab);
 
     /* Segment not valid for use (cooked meaning of .p)? */
-    if ( !desctab.attr.fields.p )
+    if ( !desctab.p )
         goto fail;
 
     /* Check against descriptor table limit. */
@@ -2785,10 +2785,10 @@ static int hvm_load_segment_selector(
     segr.base = (((desc.b <<  0) & 0xff000000u) |
                  ((desc.b << 16) & 0x00ff0000u) |
                  ((desc.a >> 16) & 0x0000ffffu));
-    segr.attr.bytes = (((desc.b >>  8) & 0x00ffu) |
-                       ((desc.b >> 12) & 0x0f00u));
+    segr.attr = (((desc.b >>  8) & 0x00ffu) |
+                 ((desc.b >> 12) & 0x0f00u));
     segr.limit = (desc.b & 0x000f0000u) | (desc.a & 0x0000ffffu);
-    if ( segr.attr.fields.g )
+    if ( segr.g )
         segr.limit = (segr.limit << 12) | 0xfffu;
     segr.sel = sel;
     hvm_set_segment_register(v, seg, &segr);
@@ -2886,13 +2886,13 @@ void hvm_task_switch(
     tr.base = (((tss_desc.b <<  0) & 0xff000000u) |
                ((tss_desc.b << 16) & 0x00ff0000u) |
                ((tss_desc.a >> 16) & 0x0000ffffu));
-    tr.attr.bytes = (((tss_desc.b >>  8) & 0x00ffu) |
-                     ((tss_desc.b >> 12) & 0x0f00u));
+    tr.attr = (((tss_desc.b >>  8) & 0x00ffu) |
+               ((tss_desc.b >> 12) & 0x0f00u));
     tr.limit = (tss_desc.b & 0x000f0000u) | (tss_desc.a & 0x0000ffffu);
-    if ( tr.attr.fields.g )
+    if ( tr.g )
         tr.limit = (tr.limit << 12) | 0xfffu;
 
-    if ( tr.attr.fields.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) )
+    if ( tr.type != ((taskswitch_reason == TSW_iret) ? 0xb : 0x9) )
     {
         hvm_inject_hw_exception(
             (taskswitch_reason == TSW_iret) ? TRAP_invalid_tss : TRAP_gp_fault,
@@ -2900,7 +2900,7 @@ void hvm_task_switch(
         goto out;
     }
 
-    if ( !tr.attr.fields.p )
+    if ( !tr.p )
     {
         hvm_inject_hw_exception(TRAP_no_segment, tss_sel & 0xfff8);
         goto out;
@@ -3018,7 +3018,7 @@ void hvm_task_switch(
             goto out;
     }
 
-    tr.attr.fields.type = 0xb; /* busy 32-bit tss */
+    tr.type = 0xb; /* busy 32-bit tss */
     hvm_set_segment_register(v, x86_seg_tr, &tr);
 
     v->arch.hvm_vcpu.guest_cr[0] |= X86_CR0_TS;
@@ -3038,9 +3038,9 @@ void hvm_task_switch(
         unsigned int opsz, sp;
 
         hvm_get_segment_register(v, x86_seg_cs, &cs);
-        opsz = cs.attr.fields.db ? 4 : 2;
+        opsz = cs.db ? 4 : 2;
         hvm_get_segment_register(v, x86_seg_ss, &segr);
-        if ( segr.attr.fields.db )
+        if ( segr.db )
             sp = regs->esp -= opsz;
         else
             sp = regs->sp -= opsz;
@@ -3660,7 +3660,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
     if ( opt_hvm_fep )
     {
         const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
-        uint32_t walk = (ctxt.seg_reg[x86_seg_ss].attr.fields.dpl == 3)
+        uint32_t walk = (ctxt.seg_reg[x86_seg_ss].dpl == 3)
             ? PFEC_user_mode : 0;
         unsigned long addr;
         char sig[5]; /* ud2; .ascii "xen" */
@@ -3676,7 +3676,7 @@ void hvm_ud_intercept(struct cpu_user_regs *regs)
             regs->eflags &= ~X86_EFLAGS_RF;
 
             /* Zero the upper 32 bits of %rip if not in 64bit mode. */
-            if ( !(hvm_long_mode_active(cur) && cs->attr.fields.l) )
+            if ( !(hvm_long_mode_active(cur) && cs->l) )
                 regs->rip = regs->eip;
 
             add_taint(TAINT_HVM_FEP);
@@ -3828,25 +3828,25 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, 
uint16_t ip)
     reg.sel = cs;
     reg.base = (uint32_t)reg.sel << 4;
     reg.limit = 0xffff;
-    reg.attr.bytes = 0x09b;
+    reg.attr = 0x9b;
     hvm_set_segment_register(v, x86_seg_cs, &reg);
 
     reg.sel = reg.base = 0;
     reg.limit = 0xffff;
-    reg.attr.bytes = 0x093;
+    reg.attr = 0x93;
     hvm_set_segment_register(v, x86_seg_ds, &reg);
     hvm_set_segment_register(v, x86_seg_es, &reg);
     hvm_set_segment_register(v, x86_seg_fs, &reg);
     hvm_set_segment_register(v, x86_seg_gs, &reg);
     hvm_set_segment_register(v, x86_seg_ss, &reg);
 
-    reg.attr.bytes = 0x82; /* LDT */
+    reg.attr = 0x82; /* LDT */
     hvm_set_segment_register(v, x86_seg_ldtr, &reg);
 
-    reg.attr.bytes = 0x8b; /* 32-bit TSS (busy) */
+    reg.attr = 0x8b; /* 32-bit TSS (busy) */
     hvm_set_segment_register(v, x86_seg_tr, &reg);
 
-    reg.attr.bytes = 0;
+    reg.attr = 0;
     hvm_set_segment_register(v, x86_seg_gdtr, &reg);
     hvm_set_segment_register(v, x86_seg_idtr, &reg);
 
@@ -4787,8 +4787,8 @@ void hvm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
     {
     case x86_seg_ss:
         /* SVM may retain %ss.DB when %ss is loaded with a NULL selector. */
-        if ( !reg->attr.fields.p )
-            reg->attr.fields.db = 0;
+        if ( !reg->p )
+            reg->db = 0;
         break;
 
     case x86_seg_tr:
@@ -4796,14 +4796,14 @@ void hvm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
          * SVM doesn't track %tr.B. Architecturally, a loaded TSS segment will
          * always be busy.
          */
-        reg->attr.fields.type |= 0x2;
+        reg->type |= 0x2;
 
         /*
          * %cs and %tr are unconditionally present.  SVM ignores these present
          * bits and will happily run without them set.
          */
     case x86_seg_cs:
-        reg->attr.fields.p = 1;
+        reg->p = 1;
         break;
 
     case x86_seg_gdtr:
@@ -4812,21 +4812,21 @@ void hvm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
          * Treat GDTR/IDTR as being present system segments.  This avoids them
          * needing special casing for segmentation checks.
          */
-        reg->attr.bytes = 0x80;
+        reg->attr = 0x80;
         break;
 
     default: /* Avoid triggering -Werror=switch */
         break;
     }
 
-    if ( reg->attr.fields.p )
+    if ( reg->p )
     {
         /*
          * For segments which are present/usable, cook the system flag.  SVM
          * ignores the S bit on all segments and will happily run with them in
          * any state.
          */
-        reg->attr.fields.s = is_x86_user_segment(seg);
+        reg->s = is_x86_user_segment(seg);
 
         /*
          * SVM discards %cs.G on #VMEXIT.  Other user segments do have .G
@@ -4836,14 +4836,14 @@ void hvm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
          *
          * Unconditionally recalculate G.
          */
-        reg->attr.fields.g = !!(reg->limit >> 20);
+        reg->g = !!(reg->limit >> 20);
 
         /*
          * SVM doesn't track the Accessed flag.  It will always be set for
          * usable user segments loaded into the descriptor cache.
          */
         if ( is_x86_user_segment(seg) )
-            reg->attr.fields.type |= 0x1;
+            reg->type |= 0x1;
     }
 }
 
@@ -4851,25 +4851,25 @@ void hvm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
                               struct segment_register *reg)
 {
     /* Set G to match the limit field.  VT-x cares, while SVM doesn't. */
-    if ( reg->attr.fields.p )
-        reg->attr.fields.g = !!(reg->limit >> 20);
+    if ( reg->p )
+        reg->g = !!(reg->limit >> 20);
 
     switch ( seg )
     {
     case x86_seg_cs:
-        ASSERT(reg->attr.fields.p);                  /* Usable. */
-        ASSERT(reg->attr.fields.s);                  /* User segment. */
-        ASSERT(reg->attr.fields.type & 0x1);         /* Accessed. */
+        ASSERT(reg->p);                              /* Usable. */
+        ASSERT(reg->s);                              /* User segment. */
+        ASSERT(reg->type & 0x1);                     /* Accessed. */
         ASSERT((reg->base >> 32) == 0);              /* Upper bits clear. */
         break;
 
     case x86_seg_ss:
-        if ( reg->attr.fields.p )
+        if ( reg->p )
         {
-            ASSERT(reg->attr.fields.s);              /* User segment. */
-            ASSERT(!(reg->attr.fields.type & 0x8));  /* Data segment. */
-            ASSERT(reg->attr.fields.type & 0x2);     /* Writeable. */
-            ASSERT(reg->attr.fields.type & 0x1);     /* Accessed. */
+            ASSERT(reg->s);                          /* User segment. */
+            ASSERT(!(reg->type & 0x8));              /* Data segment. */
+            ASSERT(reg->type & 0x2);                 /* Writeable. */
+            ASSERT(reg->type & 0x1);                 /* Accessed. */
             ASSERT((reg->base >> 32) == 0);          /* Upper bits clear. */
         }
         break;
@@ -4878,14 +4878,14 @@ void hvm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
     case x86_seg_es:
     case x86_seg_fs:
     case x86_seg_gs:
-        if ( reg->attr.fields.p )
+        if ( reg->p )
         {
-            ASSERT(reg->attr.fields.s);              /* User segment. */
+            ASSERT(reg->s);                          /* User segment. */
 
-            if ( reg->attr.fields.type & 0x8 )
-                ASSERT(reg->attr.fields.type & 0x2); /* Readable. */
+            if ( reg->type & 0x8 )
+                ASSERT(reg->type & 0x2);             /* Readable. */
 
-            ASSERT(reg->attr.fields.type & 0x1);     /* Accessed. */
+            ASSERT(reg->type & 0x1);                 /* Accessed. */
 
             if ( seg == x86_seg_fs || seg == x86_seg_gs )
                 ASSERT(is_canonical_address(reg->base));
@@ -4895,23 +4895,23 @@ void hvm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
         break;
 
     case x86_seg_tr:
-        ASSERT(reg->attr.fields.p);                  /* Usable. */
-        ASSERT(!reg->attr.fields.s);                 /* System segment. */
+        ASSERT(reg->p);                              /* Usable. */
+        ASSERT(!reg->s);                             /* System segment. */
         ASSERT(!(reg->sel & 0x4));                   /* !TI. */
-        if ( reg->attr.fields.type == SYS_DESC_tss_busy )
+        if ( reg->type == SYS_DESC_tss_busy )
             ASSERT(is_canonical_address(reg->base));
-        else if ( reg->attr.fields.type == SYS_DESC_tss16_busy )
+        else if ( reg->type == SYS_DESC_tss16_busy )
             ASSERT((reg->base >> 32) == 0);
         else
             ASSERT(!"%tr typecheck failure");
         break;
 
     case x86_seg_ldtr:
-        if ( reg->attr.fields.p )
+        if ( reg->p )
         {
-            ASSERT(!reg->attr.fields.s);             /* System segment. */
+            ASSERT(!reg->s);                         /* System segment. */
             ASSERT(!(reg->sel & 0x4));               /* !TI. */
-            ASSERT(reg->attr.fields.type == SYS_DESC_ldt);
+            ASSERT(reg->type == SYS_DESC_ldt);
             ASSERT(is_canonical_address(reg->base));
         }
         break;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 402e815..244da12 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -526,9 +526,9 @@ static int svm_guest_x86_mode(struct vcpu *v)
         return 0;
     if ( unlikely(guest_cpu_user_regs()->eflags & X86_EFLAGS_VM) )
         return 1;
-    if ( hvm_long_mode_active(v) && likely(vmcb->cs.attr.fields.l) )
+    if ( hvm_long_mode_active(v) && likely(vmcb->cs.l) )
         return 8;
-    return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
+    return likely(vmcb->cs.db) ? 4 : 2;
 }
 
 void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
@@ -653,7 +653,7 @@ static void svm_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
         break;
     case x86_seg_ss:
         *reg = vmcb->ss;
-        reg->attr.fields.dpl = vmcb_get_cpl(vmcb);
+        reg->dpl = vmcb_get_cpl(vmcb);
         break;
     case x86_seg_tr:
         svm_sync_vmcb(v);
@@ -726,7 +726,7 @@ static void svm_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
         break;
     case x86_seg_ss:
         vmcb->ss = *reg;
-        vmcb_set_cpl(vmcb, reg->attr.fields.dpl);
+        vmcb_set_cpl(vmcb, reg->dpl);
         break;
     case x86_seg_tr:
         vmcb->tr = *reg;
@@ -1442,7 +1442,7 @@ static void svm_inject_event(const struct x86_event 
*event)
      * If injecting an event outside of 64bit mode, zero the upper bits of the
      * %eip and nextrip after the adjustments above.
      */
-    if ( !((vmcb_get_efer(vmcb) & EFER_LMA) && vmcb->cs.attr.fields.l) )
+    if ( !((vmcb_get_efer(vmcb) & EFER_LMA) && vmcb->cs.l) )
     {
         regs->rip = regs->eip;
         vmcb->nextrip = (uint32_t)vmcb->nextrip;
diff --git a/xen/arch/x86/hvm/svm/svmdebug.c b/xen/arch/x86/hvm/svm/svmdebug.c
index 4902824..89ef2db 100644
--- a/xen/arch/x86/hvm/svm/svmdebug.c
+++ b/xen/arch/x86/hvm/svm/svmdebug.c
@@ -24,7 +24,7 @@
 static void svm_dump_sel(const char *name, const struct segment_register *s)
 {
     printk("%s: %04x %04x %08x %016"PRIx64"\n",
-           name, s->sel, s->attr.bytes, s->limit, s->base);
+           name, s->sel, s->attr, s->limit, s->base);
 }
 
 void svm_vmcb_dump(const char *from, const struct vmcb_struct *vmcb)
@@ -147,7 +147,7 @@ bool svm_vmcb_isvalid(const char *from, const struct 
vmcb_struct *vmcb,
     }
 
     if ( (efer & EFER_LME) && (cr0 & X86_CR0_PG) && (cr4 & X86_CR4_PAE) &&
-         vmcb->cs.attr.fields.l && vmcb->cs.attr.fields.db )
+         vmcb->cs.l && vmcb->cs.db )
         PRINTF("EFER_LME, CR0.PG, CR4.PAE, CS.L and CS.D are all non-zero\n");
 
     if ( !(vmcb_get_general2_intercepts(vmcb) & GENERAL2_INTERCEPT_VMRUN) )
diff --git a/xen/arch/x86/hvm/svm/vmcb.c b/xen/arch/x86/hvm/svm/vmcb.c
index 2e67d8d..9493215 100644
--- a/xen/arch/x86/hvm/svm/vmcb.c
+++ b/xen/arch/x86/hvm/svm/vmcb.c
@@ -158,12 +158,12 @@ static int construct_vmcb(struct vcpu *v)
     vmcb->gs.base = 0;
 
     /* Guest segment AR bytes. */
-    vmcb->es.attr.bytes = 0xc93; /* read/write, accessed */
-    vmcb->ss.attr.bytes = 0xc93;
-    vmcb->ds.attr.bytes = 0xc93;
-    vmcb->fs.attr.bytes = 0xc93;
-    vmcb->gs.attr.bytes = 0xc93;
-    vmcb->cs.attr.bytes = 0xc9b; /* exec/read, accessed */
+    vmcb->es.attr = 0xc93; /* read/write, accessed */
+    vmcb->ss.attr = 0xc93;
+    vmcb->ds.attr = 0xc93;
+    vmcb->fs.attr = 0xc93;
+    vmcb->gs.attr = 0xc93;
+    vmcb->cs.attr = 0xc9b; /* exec/read, accessed */
 
     /* Guest IDT. */
     vmcb->idtr.base = 0;
@@ -177,10 +177,10 @@ static int construct_vmcb(struct vcpu *v)
     vmcb->ldtr.sel = 0;
     vmcb->ldtr.base = 0;
     vmcb->ldtr.limit = 0;
-    vmcb->ldtr.attr.bytes = 0;
+    vmcb->ldtr.attr = 0;
 
     /* Guest TSS. */
-    vmcb->tr.attr.bytes = 0x08b; /* 32-bit TSS (busy) */
+    vmcb->tr.attr = 0x08b; /* 32-bit TSS (busy) */
     vmcb->tr.base = 0;
     vmcb->tr.limit = 0xff;
 
diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c
index 1996b1f..11bde58 100644
--- a/xen/arch/x86/hvm/vmx/realmode.c
+++ b/xen/arch/x86/hvm/vmx/realmode.c
@@ -70,7 +70,7 @@ static void realmode_deliver_exception(
     frame[2] = regs->flags & ~X86_EFLAGS_RF;
 
     /* We can't test hvmemul_ctxt->ctxt.sp_size: it may not be initialised. */
-    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].attr.fields.db )
+    if ( hvmemul_ctxt->seg_reg[x86_seg_ss].db )
         pstk = regs->esp -= 6;
     else
         pstk = regs->sp -= 6;
@@ -207,13 +207,13 @@ void vmx_realmode(struct cpu_user_regs *regs)
          * DS, ES, FS and GS the most uninvasive trick is to set DPL == RPL.
          */
         sreg = hvmemul_get_seg_reg(x86_seg_ds, &hvmemul_ctxt);
-        sreg->attr.fields.dpl = sreg->sel & 3;
+        sreg->dpl = sreg->sel & 3;
         sreg = hvmemul_get_seg_reg(x86_seg_es, &hvmemul_ctxt);
-        sreg->attr.fields.dpl = sreg->sel & 3;
+        sreg->dpl = sreg->sel & 3;
         sreg = hvmemul_get_seg_reg(x86_seg_fs, &hvmemul_ctxt);
-        sreg->attr.fields.dpl = sreg->sel & 3;
+        sreg->dpl = sreg->sel & 3;
         sreg = hvmemul_get_seg_reg(x86_seg_gs, &hvmemul_ctxt);
-        sreg->attr.fields.dpl = sreg->sel & 3;
+        sreg->dpl = sreg->sel & 3;
         hvmemul_ctxt.seg_reg_dirty |=
             (1ul << x86_seg_ds) | (1ul << x86_seg_es) |
             (1ul << x86_seg_fs) | (1ul << x86_seg_gs);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 69ce3aa..e5dab11 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1069,24 +1069,20 @@ static unsigned int _vmx_get_cpl(struct vcpu *v)
     return cpl;
 }
 
-/* SDM volume 3b section 22.3.1.2: we can only enter virtual 8086 mode
- * if all of CS, SS, DS, ES, FS and GS are 16bit ring-3 data segments.
- * The guest thinks it's got ring-0 segments, so we need to fudge
- * things.  We store the ring-3 version in the VMCS to avoid lots of
- * shuffling on vmenter and vmexit, and translate in these accessors. */
-
-#define rm_cs_attr (((union segment_attributes) {                       \
-        .fields = { .type = 0xb, .s = 1, .dpl = 0, .p = 1, .avl = 0,    \
-                    .l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
-#define rm_ds_attr (((union segment_attributes) {                       \
-        .fields = { .type = 0x3, .s = 1, .dpl = 0, .p = 1, .avl = 0,    \
-                    .l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
-#define vm86_ds_attr (((union segment_attributes) {                     \
-        .fields = { .type = 0x3, .s = 1, .dpl = 3, .p = 1, .avl = 0,    \
-                    .l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
-#define vm86_tr_attr (((union segment_attributes) {                     \
-        .fields = { .type = 0xb, .s = 0, .dpl = 0, .p = 1, .avl = 0,    \
-                    .l = 0, .db = 0, .g = 0, .pad = 0 } }).bytes)
+/*
+ * SDM Vol 3: VM Entries > Checks on Guest Segment Registers:
+ *
+ * We can only enter virtual 8086 mode if all of CS, SS, DS, ES, FS and GS are
+ * 16bit ring-3 data segments.  On hardware lacking the unrestricted_guest
+ * feature, Xen fakes up real mode using vm86 mode.  The guest thinks it's got
+ * ring-0 segments, so we need to fudge things.  We store the ring-3 version
+ * in the VMCS to avoid lots of shuffling on vmenter and vmexit, and translate
+ * in these accessors.
+ */
+#define rm_cs_attr   0x9b
+#define rm_ds_attr   0x93
+#define vm86_ds_attr 0xf3
+#define vm86_tr_attr 0x8b
 
 static void vmx_get_segment_register(struct vcpu *v, enum x86_segment seg,
                                      struct segment_register *reg)
@@ -1157,7 +1153,7 @@ static void vmx_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
      * Fold VT-x representation into Xen's representation.  The Present bit is
      * unconditionally set to the inverse of unusable.
      */
-    reg->attr.bytes =
+    reg->attr =
         (!(attr & (1u << 16)) << 7) | (attr & 0x7f) | ((attr >> 4) & 0xf00);
 
     /* Adjust for virtual 8086 mode */
@@ -1176,7 +1172,7 @@ static void vmx_get_segment_register(struct vcpu *v, enum 
x86_segment seg,
              * but for SS we assume it has: the Ubuntu graphical bootloader
              * does this and gets badly confused if we leave the old SS in 
              * place. */
-            reg->attr.bytes = (seg == x86_seg_cs ? rm_cs_attr : rm_ds_attr);
+            reg->attr = (seg == x86_seg_cs ? rm_cs_attr : rm_ds_attr);
             *sreg = *reg;
         }
         else 
@@ -1196,7 +1192,7 @@ static void vmx_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
     uint64_t base;
 
     sel = reg->sel;
-    attr = reg->attr.bytes;
+    attr = reg->attr;
     limit = reg->limit;
     base = reg->base;
 
@@ -1234,8 +1230,7 @@ static void vmx_set_segment_register(struct vcpu *v, enum 
x86_segment seg,
              * cause confusion for the guest if it reads the selector,
              * but otherwise we have to emulate if *any* segment hasn't
              * been reloaded. */
-            if ( base < 0x100000 && !(base & 0xf) && limit >= 0xffff
-                 && reg->attr.fields.p )
+            if ( base < 0x100000 && !(base & 0xf) && limit >= 0xffff && reg->p 
)
             {
                 sel = base >> 4;
                 attr = vm86_ds_attr;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 36f5746..268bae4 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -332,13 +332,13 @@ const struct x86_emulate_ops *shadow_init_emulation(
     creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
 
     /* Work out the emulation mode. */
-    if ( sh_ctxt->ctxt.lma && creg->attr.fields.l )
+    if ( sh_ctxt->ctxt.lma && creg->l )
         sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64;
     else
     {
         sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
-        sh_ctxt->ctxt.addr_size = creg->attr.fields.db ? 32 : 16;
-        sh_ctxt->ctxt.sp_size   = sreg->attr.fields.db ? 32 : 16;
+        sh_ctxt->ctxt.addr_size = creg->db ? 32 : 16;
+        sh_ctxt->ctxt.sp_size   = sreg->db ? 32 : 16;
     }
 
     /* Attempt to prefetch whole instruction. */
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 85185b6..d50f519 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -479,7 +479,7 @@ static int priv_op_read_segment(enum x86_segment seg,
             return X86EMUL_UNHANDLEABLE;
 
         reg->limit = limit;
-        reg->attr.bytes = ar >> 8;
+        reg->attr = ar >> 8;
     }
     else
     {
@@ -500,19 +500,19 @@ static int priv_op_read_segment(enum x86_segment seg,
 
         reg->limit = ~0U;
 
-        reg->attr.bytes = 0;
-        reg->attr.fields.type = _SEGMENT_WR >> 8;
+        reg->attr = 0;
+        reg->type = _SEGMENT_WR >> 8;
         if ( seg == x86_seg_cs )
         {
-            reg->attr.fields.type |= _SEGMENT_CODE >> 8;
-            reg->attr.fields.l = 1;
+            reg->type |= _SEGMENT_CODE >> 8;
+            reg->l = 1;
         }
         else
-            reg->attr.fields.db = 1;
-        reg->attr.fields.s   = 1;
-        reg->attr.fields.dpl = 3;
-        reg->attr.fields.p   = 1;
-        reg->attr.fields.g   = 1;
+            reg->db = 1;
+        reg->s   = 1;
+        reg->dpl = 3;
+        reg->p   = 1;
+        reg->g   = 1;
     }
 
     /*
@@ -521,9 +521,9 @@ static int priv_op_read_segment(enum x86_segment seg,
      */
     if ( (seg == x86_seg_ss ||
           (seg == x86_seg_cs &&
-           !(reg->attr.fields.type & (_SEGMENT_EC >> 8)))) &&
+           !(reg->type & (_SEGMENT_EC >> 8)))) &&
          guest_kernel_mode(current, ctxt->regs) )
-        reg->attr.fields.dpl = 0;
+        reg->dpl = 0;
 
     return X86EMUL_OKAY;
 }
@@ -578,11 +578,11 @@ static int priv_op_rep_ins(uint16_t port,
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( !sreg.attr.fields.p )
+    if ( !sreg.p )
         return X86EMUL_UNHANDLEABLE;
-    if ( !sreg.attr.fields.s ||
-         (sreg.attr.fields.type & (_SEGMENT_CODE >> 8)) ||
-         !(sreg.attr.fields.type & (_SEGMENT_WR >> 8)) )
+    if ( !sreg.s ||
+         (sreg.type & (_SEGMENT_CODE >> 8)) ||
+         !(sreg.type & (_SEGMENT_WR >> 8)) )
     {
         x86_emul_hw_exception(TRAP_gp_fault, 0, ctxt);
         return X86EMUL_EXCEPTION;
@@ -643,11 +643,11 @@ static int priv_op_rep_outs(enum x86_segment seg, 
unsigned long offset,
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    if ( !sreg.attr.fields.p )
+    if ( !sreg.p )
         return X86EMUL_UNHANDLEABLE;
-    if ( !sreg.attr.fields.s ||
-         ((sreg.attr.fields.type & (_SEGMENT_CODE >> 8)) &&
-          !(sreg.attr.fields.type & (_SEGMENT_WR >> 8))) )
+    if ( !sreg.s ||
+         ((sreg.type & (_SEGMENT_CODE >> 8)) &&
+          !(sreg.type & (_SEGMENT_WR >> 8))) )
     {
         x86_emul_hw_exception(seg != x86_seg_ss ? TRAP_gp_fault
                                                 : TRAP_stack_error,
diff --git a/xen/arch/x86/vm_event.c b/xen/arch/x86/vm_event.c
index a6ea42c..f91aade 100644
--- a/xen/arch/x86/vm_event.c
+++ b/xen/arch/x86/vm_event.c
@@ -176,7 +176,7 @@ void vm_event_fill_regs(vm_event_request_t *req)
     req->data.regs.x86.gs_base = seg.base;
 
     hvm_get_segment_register(curr, x86_seg_cs, &seg);
-    req->data.regs.x86.cs_arbytes = seg.attr.bytes;
+    req->data.regs.x86.cs_arbytes = seg.attr;
 }
 
 void vm_event_emulate_check(struct vcpu *v, vm_event_response_t *rsp)
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c 
b/xen/arch/x86/x86_emulate/x86_emulate.c
index 6819ab3..2201852 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -973,7 +973,7 @@ do {                                                        
            \
         ASSERT(!ctxt->lma);                                             \
         generate_exception_if((ip) > (cs)->limit, EXC_GP, 0);           \
     } else                                                              \
-        generate_exception_if(ctxt->lma && (cs)->attr.fields.l          \
+        generate_exception_if(ctxt->lma && (cs)->l                      \
                               ? !is_canonical_address(ip)               \
                               : (ip) > (cs)->limit, EXC_GP, 0);         \
 })
@@ -1414,7 +1414,7 @@ get_cpl(
          ops->read_segment(x86_seg_ss, &reg, ctxt) )
         return -1;
 
-    return reg.attr.fields.dpl;
+    return reg.dpl;
 }
 
 static int
@@ -1470,7 +1470,7 @@ static int ioport_access_check(
         return rc == X86EMUL_DONE ? X86EMUL_OKAY : rc;
 
     /* Ensure the TSS has an io-bitmap-offset field. */
-    generate_exception_if(tr.attr.fields.type != 0xb, EXC_GP, 0);
+    generate_exception_if(tr.type != 0xb, EXC_GP, 0);
 
     switch ( rc = read_ulong(x86_seg_tr, 0x66, &iobmp, 2, ctxt, ops) )
     {
@@ -1693,12 +1693,12 @@ protmode_load_seg(
              ops->read_segment(seg, sreg, ctxt) != X86EMUL_OKAY )
             memset(sreg, 0, sizeof(*sreg));
         else
-            sreg->attr.bytes = 0;
+            sreg->attr = 0;
         sreg->sel = sel;
 
         /* Since CPL == SS.DPL, we need to put back DPL. */
         if ( seg == x86_seg_ss )
-            sreg->attr.fields.dpl = sel;
+            sreg->dpl = sel;
 
         return X86EMUL_OKAY;
     }
@@ -1873,10 +1873,10 @@ protmode_load_seg(
                   ((desc.b <<  0) & 0xff000000u) |
                   ((desc.b << 16) & 0x00ff0000u) |
                   ((desc.a >> 16) & 0x0000ffffu));
-    sreg->attr.bytes = (((desc.b >>  8) & 0x00ffu) |
-                        ((desc.b >> 12) & 0x0f00u));
+    sreg->attr = (((desc.b >>  8) & 0x00ffu) |
+                  ((desc.b >> 12) & 0x0f00u));
     sreg->limit = (desc.b & 0x000f0000u) | (desc.a & 0x0000ffffu);
-    if ( sreg->attr.fields.g )
+    if ( sreg->g )
         sreg->limit = (sreg->limit << 12) | 0xfffu;
     sreg->sel = sel;
     return X86EMUL_OKAY;
@@ -4964,9 +4964,9 @@ x86_emulate(
                                             &sreg, ctxt, ops) )
             {
             case X86EMUL_OKAY:
-                if ( sreg.attr.fields.s &&
-                     ((modrm_reg & 1) ? ((sreg.attr.fields.type & 0xa) == 0x2)
-                                      : ((sreg.attr.fields.type & 0xa) != 
0x8)) )
+                if ( sreg.s &&
+                     ((modrm_reg & 1) ? ((sreg.type & 0xa) == 0x2)
+                                      : ((sreg.type & 0xa) != 0x8)) )
                     _regs.eflags |= X86_EFLAGS_ZF;
                 break;
             case X86EMUL_EXCEPTION:
@@ -5189,9 +5189,9 @@ x86_emulate(
                                         ctxt, ops) )
         {
         case X86EMUL_OKAY:
-            if ( !sreg.attr.fields.s )
+            if ( !sreg.s )
             {
-                switch ( sreg.attr.fields.type )
+                switch ( sreg.type )
                 {
                 case 0x01: /* available 16-bit TSS */
                 case 0x03: /* busy 16-bit TSS */
@@ -5223,10 +5223,9 @@ x86_emulate(
             break;
         }
         if ( _regs.eflags & X86_EFLAGS_ZF )
-            dst.val = ((sreg.attr.bytes & 0xff) << 8) |
-                      ((sreg.limit >> (sreg.attr.fields.g ? 12 : 0)) &
-                       0xf0000) |
-                      ((sreg.attr.bytes & 0xf00) << 12);
+            dst.val = ((sreg.attr & 0xff) << 8) |
+                      ((sreg.limit >> (sreg.g ? 12 : 0)) & 0xf0000) |
+                      ((sreg.attr & 0xf00) << 12);
         else
             dst.type = OP_NONE;
         break;
@@ -5238,9 +5237,9 @@ x86_emulate(
                                         ctxt, ops) )
         {
         case X86EMUL_OKAY:
-            if ( !sreg.attr.fields.s )
+            if ( !sreg.s )
             {
-                switch ( sreg.attr.fields.type )
+                switch ( sreg.type )
                 {
                 case 0x01: /* available 16-bit TSS */
                 case 0x03: /* busy 16-bit TSS */
@@ -5291,12 +5290,12 @@ x86_emulate(
 
         cs.base = sreg.base = 0; /* flat segment */
         cs.limit = sreg.limit = ~0u;  /* 4GB limit */
-        sreg.attr.bytes = 0xc93; /* G+DB+P+S+Data */
+        sreg.attr = 0xc93; /* G+DB+P+S+Data */
 
 #ifdef __x86_64__
         if ( ctxt->lma )
         {
-            cs.attr.bytes = 0xa9b; /* L+DB+P+S+Code */
+            cs.attr = 0xa9b; /* L+DB+P+S+Code */
 
             _regs.rcx = _regs.rip;
             _regs.r11 = _regs.eflags & ~X86_EFLAGS_RF;
@@ -5314,7 +5313,7 @@ x86_emulate(
         else
 #endif
         {
-            cs.attr.bytes = 0xc9b; /* G+DB+P+S+Code */
+            cs.attr = 0xc9b; /* G+DB+P+S+Code */
 
             _regs.r(cx) = _regs.eip;
             _regs.eip = msr_val;
@@ -5747,13 +5746,13 @@ x86_emulate(
         cs.sel = msr_val & ~3; /* SELECTOR_RPL_MASK */
         cs.base = 0;   /* flat segment */
         cs.limit = ~0u;  /* 4GB limit */
-        cs.attr.bytes = ctxt->lma ? 0xa9b  /* G+L+P+S+Code */
-                                  : 0xc9b; /* G+DB+P+S+Code */
+        cs.attr = ctxt->lma ? 0xa9b  /* G+L+P+S+Code */
+                            : 0xc9b; /* G+DB+P+S+Code */
 
         sreg.sel = cs.sel + 8;
         sreg.base = 0;   /* flat segment */
         sreg.limit = ~0u;  /* 4GB limit */
-        sreg.attr.bytes = 0xc93; /* G+DB+P+S+Data */
+        sreg.attr = 0xc93; /* G+DB+P+S+Data */
 
         fail_if(ops->write_segment == NULL);
         if ( (rc = ops->write_segment(x86_seg_cs, &cs, ctxt)) != 0 ||
@@ -5793,13 +5792,13 @@ x86_emulate(
                  (op_bytes == 8 ? 32 : 16);
         cs.base = 0;   /* flat segment */
         cs.limit = ~0u;  /* 4GB limit */
-        cs.attr.bytes = op_bytes == 8 ? 0xafb  /* L+DB+P+DPL3+S+Code */
-                                      : 0xcfb; /* G+DB+P+DPL3+S+Code */
+        cs.attr = op_bytes == 8 ? 0xafb  /* L+DB+P+DPL3+S+Code */
+                                : 0xcfb; /* G+DB+P+DPL3+S+Code */
 
         sreg.sel = cs.sel + 8;
         sreg.base = 0;   /* flat segment */
         sreg.limit = ~0u;  /* 4GB limit */
-        sreg.attr.bytes = 0xcf3; /* G+DB+P+DPL3+S+Data */
+        sreg.attr = 0xcf3; /* G+DB+P+DPL3+S+Data */
 
         fail_if(ops->write_segment == NULL);
         if ( (rc = ops->write_segment(x86_seg_cs, &cs, ctxt)) != 0 ||
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h 
b/xen/arch/x86/x86_emulate/x86_emulate.h
index e5ec8a6..4ddf111 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.h
+++ b/xen/arch/x86/x86_emulate/x86_emulate.h
@@ -83,33 +83,26 @@ struct x86_event {
     unsigned long cr2;          /* Only for TRAP_page_fault h/w exception */
 };
 
-/* 
- * Attribute for segment selector. This is a copy of bit 40:47 & 52:55 of the
- * segment descriptor. It happens to match the format of an AMD SVM VMCB.
- */
-typedef union segment_attributes {
-    uint16_t bytes;
-    struct
-    {
-        uint16_t type:4;    /* 0;  Bit 40-43 */
-        uint16_t s:   1;    /* 4;  Bit 44 */
-        uint16_t dpl: 2;    /* 5;  Bit 45-46 */
-        uint16_t p:   1;    /* 7;  Bit 47 */
-        uint16_t avl: 1;    /* 8;  Bit 52 */
-        uint16_t l:   1;    /* 9;  Bit 53 */
-        uint16_t db:  1;    /* 10; Bit 54 */
-        uint16_t g:   1;    /* 11; Bit 55 */
-        uint16_t pad: 4;
-    } fields;
-} segment_attributes_t;
-
 /*
  * Full state of a segment register (visible and hidden portions).
- * Again, this happens to match the format of an AMD SVM VMCB.
+ * Chosen to match the format of an AMD SVM VMCB.
  */
 struct segment_register {
     uint16_t   sel;
-    segment_attributes_t attr;
+    union {
+        uint16_t attr;
+        struct {
+            uint16_t type:4;
+            uint16_t s:   1;
+            uint16_t dpl: 2;
+            uint16_t p:   1;
+            uint16_t avl: 1;
+            uint16_t l:   1;
+            uint16_t db:  1;
+            uint16_t g:   1;
+            uint16_t pad: 4;
+        };
+    };
     uint32_t   limit;
     uint64_t   base;
 };
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.