[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] x86_emulate adjustments



- don't provide cmpxchg8b emulation where not needed (i.e. page table ops on 
64-bit hv)
- properly deal with stack operands (push/pop)
- synchronize prefix handling with hvm's instrlen determination and about to be 
committed
  privileged op decoder (changes coming with the 32on64 patches)
- support cmpxchg16b if the CPU supports it
- a couple of minor adjustments

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxxxx>

Index: 2006-12-18/xen/arch/x86/mm.c
===================================================================
--- 2006-12-18.orig/xen/arch/x86/mm.c   2006-12-15 16:33:59.000000000 +0100
+++ 2006-12-18/xen/arch/x86/mm.c        2006-12-18 09:36:41.000000000 +0100
@@ -3240,6 +3240,7 @@ static int ptwr_emulated_cmpxchg(
         container_of(ctxt, struct ptwr_emulate_ctxt, ctxt));
 }
 
+#ifdef __i386__
 static int ptwr_emulated_cmpxchg8b(
     enum x86_segment seg,
     unsigned long offset,
@@ -3255,13 +3256,16 @@ static int ptwr_emulated_cmpxchg8b(
         offset, ((u64)old_hi << 32) | old, ((u64)new_hi << 32) | new, 8, 1,
         container_of(ctxt, struct ptwr_emulate_ctxt, ctxt));
 }
+#endif
 
 static struct x86_emulate_ops ptwr_emulate_ops = {
     .read       = ptwr_emulated_read,
     .insn_fetch = ptwr_emulated_read,
     .write      = ptwr_emulated_write,
     .cmpxchg    = ptwr_emulated_cmpxchg,
-    .cmpxchg8b  = ptwr_emulated_cmpxchg8b
+#ifdef __i386__
+    .cmpxchg2   = ptwr_emulated_cmpxchg8b
+#endif
 };
 
 /* Write page fault handler: check if guest is trying to modify a PTE. */
Index: 2006-12-18/xen/arch/x86/mm/shadow/common.c
===================================================================
--- 2006-12-18.orig/xen/arch/x86/mm/shadow/common.c     2006-12-15 
16:33:59.000000000 +0100
+++ 2006-12-18/xen/arch/x86/mm/shadow/common.c  2006-12-18 09:36:41.000000000 
+0100
@@ -291,6 +291,7 @@ hvm_emulate_cmpxchg(enum x86_segment seg
         v, addr, old, new, bytes, sh_ctxt);
 }
 
+#ifdef __i386__
 static int 
 hvm_emulate_cmpxchg8b(enum x86_segment seg,
                       unsigned long offset,
@@ -314,13 +315,31 @@ hvm_emulate_cmpxchg8b(enum x86_segment s
     return v->arch.shadow.mode->x86_emulate_cmpxchg8b(
         v, addr, old_lo, old_hi, new_lo, new_hi, sh_ctxt);
 }
+#endif
+
+static unsigned int
+hvm_stack_word_size(struct x86_emulate_ctxt *ctxt)
+{
+#ifdef __x86_64__
+    if ( ctxt->mode == X86EMUL_MODE_PROT64 )
+        return 8;
+#endif
+
+    return hvm_get_seg_reg(x86_seg_ss,
+                           container_of(ctxt,
+                                        struct sh_emulate_ctxt,
+                                        ctxt))->attr.fields.db ? 4 : 2;
+}
 
 static struct x86_emulate_ops hvm_shadow_emulator_ops = {
     .read       = hvm_emulate_read,
     .insn_fetch = hvm_emulate_insn_fetch,
     .write      = hvm_emulate_write,
     .cmpxchg    = hvm_emulate_cmpxchg,
-    .cmpxchg8b  = hvm_emulate_cmpxchg8b,
+#ifdef __i386__
+    .cmpxchg2   = hvm_emulate_cmpxchg8b,
+#endif
+    .stksz      = hvm_stack_word_size
 };
 
 static int
@@ -371,6 +390,7 @@ pv_emulate_cmpxchg(enum x86_segment seg,
         v, offset, old, new, bytes, sh_ctxt);
 }
 
+#ifdef __i386__
 static int 
 pv_emulate_cmpxchg8b(enum x86_segment seg,
                      unsigned long offset,
@@ -386,13 +406,16 @@ pv_emulate_cmpxchg8b(enum x86_segment se
     return v->arch.shadow.mode->x86_emulate_cmpxchg8b(
         v, offset, old_lo, old_hi, new_lo, new_hi, sh_ctxt);
 }
+#endif
 
 static struct x86_emulate_ops pv_shadow_emulator_ops = {
     .read       = pv_emulate_read,
     .insn_fetch = pv_emulate_read,
     .write      = pv_emulate_write,
     .cmpxchg    = pv_emulate_cmpxchg,
-    .cmpxchg8b  = pv_emulate_cmpxchg8b,
+#ifdef __i386__
+    .cmpxchg2   = pv_emulate_cmpxchg8b,
+#endif
 };
 
 struct x86_emulate_ops *shadow_init_emulation(
Index: 2006-12-18/xen/arch/x86/mm/shadow/multi.c
===================================================================
--- 2006-12-18.orig/xen/arch/x86/mm/shadow/multi.c      2006-12-15 
16:33:59.000000000 +0100
+++ 2006-12-18/xen/arch/x86/mm/shadow/multi.c   2006-12-18 09:36:41.000000000 
+0100
@@ -3960,7 +3960,8 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u
     return rv;
 }
 
-int
+#ifdef __i386__
+static int
 sh_x86_emulate_cmpxchg8b(struct vcpu *v, unsigned long vaddr, 
                           unsigned long old_lo, unsigned long old_hi,
                           unsigned long new_lo, unsigned long new_hi,
@@ -3999,6 +4000,7 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v,
     shadow_audit_tables(v);
     return rv;
 }
+#endif
 
 
 /**************************************************************************/
@@ -4286,7 +4288,9 @@ struct shadow_paging_mode sh_paging_mode
     .detach_old_tables      = sh_detach_old_tables,
     .x86_emulate_write      = sh_x86_emulate_write,
     .x86_emulate_cmpxchg    = sh_x86_emulate_cmpxchg,
+#ifdef __i386__
     .x86_emulate_cmpxchg8b  = sh_x86_emulate_cmpxchg8b,
+#endif
     .make_monitor_table     = sh_make_monitor_table,
     .destroy_monitor_table  = sh_destroy_monitor_table,
     .guest_map_l1e          = sh_guest_map_l1e,
Index: 2006-12-18/xen/arch/x86/x86_emulate.c
===================================================================
--- 2006-12-18.orig/xen/arch/x86/x86_emulate.c  2006-12-13 11:15:54.000000000 
+0100
+++ 2006-12-18/xen/arch/x86/x86_emulate.c       2006-12-18 09:36:41.000000000 
+0100
@@ -15,6 +15,7 @@
 #include <xen/types.h>
 #include <xen/lib.h>
 #include <asm/regs.h>
+#include <asm/processor.h>
 #undef cmpxchg
 #endif
 #include <asm-x86/x86_emulate.h>
@@ -375,7 +376,10 @@ do{ __asm__ __volatile__ (              
 /* Fetch next part of the instruction being emulated. */
 #define insn_fetch_bytes(_size)                                         \
 ({ unsigned long _x;                                                    \
-   rc = ops->insn_fetch(x86_seg_cs, _regs.eip, &_x, (_size), ctxt);     \
+   if ( _regs.eip - ctxt->regs->eip < 16 - (_size) )                    \
+       rc = ops->insn_fetch(x86_seg_cs, _regs.eip, &_x, (_size), ctxt); \
+   else                                                                 \
+       goto cannot_emulate;                                             \
    if ( rc != 0 )                                                       \
        goto done;                                                       \
    _regs.eip += (_size);                                                \
@@ -389,6 +393,14 @@ do{ __asm__ __volatile__ (              
      (__ea & ((1UL << (ad_bytes << 3)) - 1)));          \
 })
 
+#define truncate_sp()                                        \
+({                                                           \
+    if ( !stksz )                                            \
+        stksz = !ops->stksz ? ad_default : ops->stksz(ctxt); \
+    ((stksz == sizeof(unsigned long)) ? _regs.esp :          \
+     (_regs.esp & ((1UL << (stksz << 3)) - 1)));             \
+})
+
 /* Update address held in a register, based on addressing mode. */
 #define register_address_increment(reg, inc)                            \
 do {                                                                    \
@@ -400,6 +412,18 @@ do {                                    
                 (((reg) + _inc) & ((1UL << (ad_bytes << 3)) - 1));      \
 } while (0)
 
+#define sp_increment(inc)                                               \
+do {                                                                    \
+    int _inc = (inc); /* signed type ensures sign extension to long */  \
+    if ( !stksz )                                                       \
+        stksz = !ops->stksz ? ad_default : ops->stksz(ctxt);            \
+    if ( stksz == sizeof(unsigned long) )                               \
+        _regs.esp += _inc;                                              \
+    else                                                                \
+        _regs.esp = (_regs.esp & ~((1UL << (stksz << 3)) - 1)) |        \
+                ((_regs.esp + _inc) & ((1UL << (stksz << 3)) - 1));     \
+} while (0)
+
 void *
 decode_register(
     uint8_t modrm_reg, struct cpu_user_regs *regs, int highbyte_regs)
@@ -450,7 +474,8 @@ x86_emulate_memop(
 
     uint8_t b, d, sib, sib_index, sib_base, twobyte = 0, rex_prefix = 0;
     uint8_t modrm, modrm_mod = 0, modrm_reg = 0, modrm_rm = 0;
-    unsigned int op_bytes, ad_bytes, lock_prefix = 0, rep_prefix = 0, i;
+    unsigned int op_bytes, ad_bytes, lock_prefix = 0, rep_prefix = 0;
+    unsigned int op_default, ad_default, stksz = 0;
     int rc = 0;
     struct operand src, dst;
     int mode = ctxt->mode;
@@ -467,15 +492,15 @@ x86_emulate_memop(
     {
     case X86EMUL_MODE_REAL:
     case X86EMUL_MODE_PROT16:
-        op_bytes = ad_bytes = 2;
+        op_default = op_bytes = ad_default = ad_bytes = 2;
         break;
     case X86EMUL_MODE_PROT32:
-        op_bytes = ad_bytes = 4;
+        op_default = op_bytes = ad_default = ad_bytes = 4;
         break;
 #ifdef __x86_64__
     case X86EMUL_MODE_PROT64:
-        op_bytes = 4;
-        ad_bytes = 8;
+        op_default = op_bytes = 4;
+        ad_default = ad_bytes = 8;
         break;
 #endif
     default:
@@ -483,18 +508,18 @@ x86_emulate_memop(
     }
 
     /* Legacy prefixes. */
-    for ( i = 0; i < 8; i++ )
+    for ( ; ; )
     {
         switch ( b = insn_fetch_type(uint8_t) )
         {
         case 0x66: /* operand-size override */
-            op_bytes ^= 6;      /* switch between 2/4 bytes */
+            op_bytes = op_default ^ 6;      /* switch between 2/4 bytes */
             break;
         case 0x67: /* address-size override */
             if ( mode == X86EMUL_MODE_PROT64 )
-                ad_bytes ^= 12; /* switch between 4/8 bytes */
+                ad_bytes = ad_default ^ 12; /* switch between 4/8 bytes */
             else
-                ad_bytes ^= 6;  /* switch between 2/4 bytes */
+                ad_bytes = ad_default ^ 6;  /* switch between 2/4 bytes */
             break;
         case 0x2e: /* CS override */
             ea.mem.seg = x86_seg_cs;
@@ -517,25 +542,29 @@ x86_emulate_memop(
         case 0xf0: /* LOCK */
             lock_prefix = 1;
             break;
+        case 0xf2: /* REPNE/REPNZ */
         case 0xf3: /* REP/REPE/REPZ */
             rep_prefix = 1;
             break;
-        case 0xf2: /* REPNE/REPNZ */
-            break;
+#ifdef __x86_64__
+        case 0x40 ... 0x4f:
+            if ( mode == X86EMUL_MODE_PROT64 )
+            {
+                rex_prefix = b;
+                continue;
+            }
+            /* FALLTHRU */
+#endif
         default:
             goto done_prefixes;
         }
+        rex_prefix = 0;
     }
  done_prefixes:
 
     /* REX prefix. */
-    if ( (mode == X86EMUL_MODE_PROT64) && ((b & 0xf0) == 0x40) )
-    {
-        rex_prefix = b;
-        if ( b & 8 ) /* REX.W */
-            op_bytes = 8;
-        b = insn_fetch_type(uint8_t);
-    }
+    if ( rex_prefix & 8 ) /* REX.W */
+        op_bytes = 8;
 
     /* Opcode byte(s). */
     d = opcode_table[b];
@@ -611,7 +640,16 @@ x86_emulate_memop(
                 if ( (modrm_mod == 0) && ((sib_base & 7) == 5) )
                     ea.mem.off += insn_fetch_type(int32_t);
                 else
+                {
                     ea.mem.off += *(long*)decode_register(sib_base, &_regs, 0);
+                    if ( sib_base == 4 && !twobyte && b == 0x8f )
+                    {
+                        /* POP to an address with esp as base register. */
+                        if ( !stksz )
+                            stksz = !ops->stksz ? ad_default : 
ops->stksz(ctxt);
+                        ea.mem.off += stksz;
+                    }
+                }
             }
             else
             {
@@ -865,12 +903,12 @@ x86_emulate_memop(
         break;
     case 0x8f: /* pop (sole member of Grp1a) */
         /* 64-bit mode: POP always pops a 64-bit operand. */
-        if ( mode == X86EMUL_MODE_PROT64 )
+        if ( mode == X86EMUL_MODE_PROT64 && dst.bytes == 4 )
             dst.bytes = 8;
-        if ( (rc = ops->read(x86_seg_ss, truncate_ea(_regs.esp),
+        if ( (rc = ops->read(x86_seg_ss, truncate_sp(),
                              &dst.val, dst.bytes, ctxt)) != 0 )
             goto done;
-        register_address_increment(_regs.esp, dst.bytes);
+        sp_increment(dst.bytes);
         break;
     case 0xc0 ... 0xc1: grp2: /* Grp2 */
         switch ( modrm_reg & 7 )
@@ -941,15 +979,15 @@ x86_emulate_memop(
             break;
         case 6: /* push */
             /* 64-bit mode: PUSH always pushes a 64-bit operand. */
-            if ( mode == X86EMUL_MODE_PROT64 )
+            if ( mode == X86EMUL_MODE_PROT64 && dst.bytes == 4 )
             {
                 dst.bytes = 8;
                 if ( (rc = ops->read(dst.mem.seg, dst.mem.off,
                                      &dst.val, 8, ctxt)) != 0 )
                     goto done;
             }
-            register_address_increment(_regs.esp, -dst.bytes);
-            if ( (rc = ops->write(x86_seg_ss, truncate_ea(_regs.esp),
+            sp_increment(-dst.bytes);
+            if ( (rc = ops->write(x86_seg_ss, truncate_sp(),
                                   dst.val, dst.bytes, ctxt)) != 0 )
                 goto done;
             dst.val = dst.orig_val; /* skanky: disable writeback */
@@ -999,7 +1037,7 @@ x86_emulate_memop(
  special_insn:
     if ( twobyte )
         goto twobyte_special_insn;
-    if ( rep_prefix )
+    if ( rep_prefix && (b & ~3) != 0xa0 )
     {
         if ( _regs.ecx == 0 )
         {
@@ -1148,6 +1186,7 @@ x86_emulate_memop(
         case 1: goto bts;
         case 2: goto btr;
         case 3: goto btc;
+        default: goto cannot_emulate;
         }
         break;
     case 0xbe: /* movsx rm8,r{16,32,64} */
@@ -1180,57 +1219,66 @@ x86_emulate_memop(
     case 0x0d: /* GrpP (prefetch) */
     case 0x18: /* Grp16 (prefetch/nop) */
         break;
-    case 0xc7: /* Grp9 (cmpxchg8b) */
-#if defined(__i386__)
-    {
-        unsigned long old_lo, old_hi;
-        if ( (rc = ops->read(ea.mem.seg, ea.mem.off+0, &old_lo, 4, ctxt)) ||
-             (rc = ops->read(ea.mem.seg, ea.mem.off+4, &old_hi, 4, ctxt)) )
-            goto done;
-        if ( (old_lo != _regs.eax) || (old_hi != _regs.edx) )
-        {
-            _regs.eax = old_lo;
-            _regs.edx = old_hi;
-            _regs.eflags &= ~EFLG_ZF;
-        }
-        else if ( ops->cmpxchg8b == NULL )
-        {
-            rc = X86EMUL_UNHANDLEABLE;
-            goto done;
-        }
-        else
+    case 0xc7: /* Grp9 (cmpxchg{8,16}b) */
+#ifdef __x86_64__
+        if ( op_bytes != 8 )
         {
-            if ( (rc = ops->cmpxchg8b(ea.mem.seg, ea.mem.off, old_lo, old_hi,
-                                      _regs.ebx, _regs.ecx, ctxt)) != 0 )
+            unsigned long old, new;
+            if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &old, 8, ctxt)) )
                 goto done;
-            _regs.eflags |= EFLG_ZF;
-        }
-        break;
-    }
-#elif defined(__x86_64__)
-    {
-        unsigned long old, new;
-        if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &old, 8, ctxt)) != 0 )
-            goto done;
-        if ( ((uint32_t)(old>>0) != (uint32_t)_regs.eax) ||
-             ((uint32_t)(old>>32) != (uint32_t)_regs.edx) )
-        {
-            _regs.eax = (uint32_t)(old>>0);
-            _regs.edx = (uint32_t)(old>>32);
-            _regs.eflags &= ~EFLG_ZF;
+            if ( ((uint32_t)(old>>0) != (uint32_t)_regs.eax) ||
+                 ((uint32_t)(old>>32) != (uint32_t)_regs.edx) )
+            {
+                _regs.eax = (uint32_t)(old>>0);
+                _regs.edx = (uint32_t)(old>>32);
+                _regs.eflags &= ~EFLG_ZF;
+            }
+            else
+            {
+                new = (_regs.ecx<<32)|(uint32_t)_regs.ebx;
+                if ( (rc = ops->cmpxchg(ea.mem.seg, ea.mem.off, old,
+                                        new, 8, ctxt)) != 0 )
+                    goto done;
+                _regs.eflags |= EFLG_ZF;
+            }
         }
         else
+#endif
         {
-            new = (_regs.ecx<<32)|(uint32_t)_regs.ebx;
-            if ( (rc = ops->cmpxchg(ea.mem.seg, ea.mem.off, old,
-                                    new, 8, ctxt)) != 0 )
+            unsigned long old_lo, old_hi;
+            if ( (rc = ops->read(ea.mem.seg, ea.mem.off, &old_lo,
+                                 sizeof(old_lo), ctxt)) ||
+                 (rc = ops->read(ea.mem.seg, ea.mem.off+sizeof(old_lo),
+                                 &old_hi, sizeof(old_hi), ctxt)) )
+                goto done;
+            if ( (old_lo != _regs.eax) || (old_hi != _regs.edx) )
+            {
+                _regs.eax = old_lo;
+                _regs.edx = old_hi;
+                _regs.eflags &= ~EFLG_ZF;
+            }
+            else if ( ops->cmpxchg2 == NULL )
+            {
+                rc = X86EMUL_UNHANDLEABLE;
                 goto done;
-            _regs.eflags |= EFLG_ZF;
+            }
+#ifdef __x86_64__
+            else if ( !cpu_has_cmpxchg16b )
+            {
+                rc = X86EMUL_UNHANDLEABLE;
+                goto done;
+            }
+#endif
+            else
+            {
+                if ( (rc = ops->cmpxchg2(ea.mem.seg, ea.mem.off, old_lo, 
old_hi,
+                                         _regs.ebx, _regs.ecx, ctxt)) != 0 )
+                    goto done;
+                _regs.eflags |= EFLG_ZF;
+            }
         }
         break;
     }
-#endif
-    }
     goto writeback;
 
  cannot_emulate:
Index: 2006-12-18/xen/include/asm-x86/cpufeature.h
===================================================================
--- 2006-12-18.orig/xen/include/asm-x86/cpufeature.h    2006-12-13 
11:15:56.000000000 +0100
+++ 2006-12-18/xen/include/asm-x86/cpufeature.h 2006-12-18 09:36:41.000000000 
+0100
@@ -118,6 +118,7 @@
 #define cpu_has_cyrix_arr      boot_cpu_has(X86_FEATURE_CYRIX_ARR)
 #define cpu_has_centaur_mcr    boot_cpu_has(X86_FEATURE_CENTAUR_MCR)
 #define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_cmpxchg16b     0
 #else /* __x86_64__ */
 #define cpu_has_vme            0
 #define cpu_has_de             1
@@ -140,6 +141,7 @@
 #define cpu_has_cyrix_arr      0
 #define cpu_has_centaur_mcr    0
 #define cpu_has_clflush                boot_cpu_has(X86_FEATURE_CLFLSH)
+#define cpu_has_cmpxchg16b     boot_cpu_has(X86_FEATURE_CX16)
 #endif
 
 #endif /* __ASM_I386_CPUFEATURE_H */
Index: 2006-12-18/xen/include/asm-x86/shadow.h
===================================================================
--- 2006-12-18.orig/xen/include/asm-x86/shadow.h        2006-12-15 
16:33:59.000000000 +0100
+++ 2006-12-18/xen/include/asm-x86/shadow.h     2006-12-18 09:36:41.000000000 
+0100
@@ -157,12 +157,14 @@ struct shadow_paging_mode {
                                             unsigned long new,
                                             unsigned int bytes,
                                             struct sh_emulate_ctxt *sh_ctxt);
+#ifdef __i386__
     int           (*x86_emulate_cmpxchg8b )(struct vcpu *v, unsigned long va,
                                             unsigned long old_lo, 
                                             unsigned long old_hi, 
                                             unsigned long new_lo,
                                             unsigned long new_hi,
                                             struct sh_emulate_ctxt *sh_ctxt);
+#endif
     mfn_t         (*make_monitor_table    )(struct vcpu *v);
     void          (*destroy_monitor_table )(struct vcpu *v, mfn_t mmfn);
     void *        (*guest_map_l1e         )(struct vcpu *v, unsigned long va,
Index: 2006-12-18/xen/include/asm-x86/x86_emulate.h
===================================================================
--- 2006-12-18.orig/xen/include/asm-x86/x86_emulate.h   2006-12-13 
11:15:56.000000000 +0100
+++ 2006-12-18/xen/include/asm-x86/x86_emulate.h        2006-12-18 
09:36:41.000000000 +0100
@@ -39,8 +39,9 @@ enum x86_segment {
  *     some out-of-band mechanism, unknown to the emulator. The memop signals
  *     failure by returning X86EMUL_PROPAGATE_FAULT to the emulator, which will
  *     then immediately bail.
- *  2. Valid access sizes are 1, 2, 4 and 8 bytes. On x86/32 systems only
- *     cmpxchg8b_emulated need support 8-byte accesses.
+ *  2. Valid access sizes are 1, 2, 4, 8, and 16 bytes. On x86/32 systems only
+ *     cmpxchg2_emulated need support 8-byte accesses. On x86/64 systems only
+ *     cmpxchg2_emulated need support 16-byte accesses.
  *  3. The emulator cannot handle 64-bit mode emulation on an x86/32 system.
  */
 /* Access completed successfully: continue emulation as normal. */
@@ -110,16 +111,17 @@ struct x86_emulate_ops
         struct x86_emulate_ctxt *ctxt);
 
     /*
-     * cmpxchg8b: Emulate an atomic (LOCKed) CMPXCHG8B operation.
+     * cmpxchg2: Emulate an atomic (LOCKed) CMPXCHG{8,16}B operation.
      *  @old:   [IN ] Value expected to be current at @addr.
      *  @new:   [IN ] Value to write to @addr.
      * NOTES:
-     *  1. This function is only ever called when emulating a real CMPXCHG8B.
-     *  2. This function is *never* called on x86/64 systems.
-     *  2. Not defining this function (i.e., specifying NULL) is equivalent
+     *  1. This function is only ever called when emulating a real 
CMPXCHG{8,16}B.
+     *  2. This function is *never* called on x86/64 systems for emulating
+     *     CMPXCHG8B.
+     *  3. Not defining this function (i.e., specifying NULL) is equivalent
      *     to defining a function that always returns X86EMUL_UNHANDLEABLE.
      */
-    int (*cmpxchg8b)(
+    int (*cmpxchg2)(
         enum x86_segment seg,
         unsigned long offset,
         unsigned long old_lo,
@@ -127,6 +129,16 @@ struct x86_emulate_ops
         unsigned long new_lo,
         unsigned long new_hi,
         struct x86_emulate_ctxt *ctxt);
+
+    /*
+     * stksz: Determine the item size of the guest stack.
+     * NOTE:
+     *  Not defining this function (i.e., specifying NULL) is equivalent
+     *  to defining a function that returns the default address size of
+     *  the execution mode (X86EMUL_MODE_xxx).
+     */
+    unsigned int (*stksz)(
+        struct x86_emulate_ctxt *ctxt);
 };
 
 struct cpu_user_regs;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.