[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] hvm: Cannot use ring_3() macro on HVM guests. It does not work because



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1198752103 0
# Node ID 1e3e30670ce4449074afdbbea23dd7382e676eb4
# Parent  2324110ef2c69da2b530d8762bb7bc4257084b07
hvm: Cannot use ring_3() macro on HVM guests. It does not work because
the CS field is not saved/restored and also because CS.RPL does not
always equal the DPL (e.g., when executing in real mode).

Instead we must interrogate SS.DPL, or CPL directly (SVM supports this).

Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/hvm.c           |    9 +++++++--
 xen/arch/x86/hvm/instrlen.c      |    6 +++---
 xen/arch/x86/hvm/platform.c      |    4 +++-
 xen/arch/x86/hvm/svm/svm.c       |    2 +-
 xen/arch/x86/hvm/vmx/vmx.c       |    2 +-
 xen/arch/x86/mm/shadow/common.c  |    6 ++++--
 xen/arch/x86/mm/shadow/multi.c   |   14 ++++++++------
 xen/arch/x86/mm/shadow/private.h |    3 ++-
 8 files changed, 29 insertions(+), 17 deletions(-)

diff -r 2324110ef2c6 -r 1e3e30670ce4 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Thu Dec 27 10:39:04 2007 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Thu Dec 27 10:41:43 2007 +0000
@@ -1272,15 +1272,18 @@ static int __hvm_copy(void *buf, paddr_t
 static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, 
                       int virt, int fetch)
 {
+    struct segment_register sreg;
     unsigned long gfn, mfn;
     p2m_type_t p2mt;
     char *p;
     int count, todo;
     uint32_t pfec = PFEC_page_present;
 
+    hvm_get_segment_register(current, x86_seg_ss, &sreg);
+
     if ( dir ) 
         pfec |= PFEC_write_access;
-    if ( ring_3(guest_cpu_user_regs()) )
+    if ( sreg.attr.fields.dpl == 3 )
         pfec |= PFEC_user_mode;
     if ( fetch ) 
         pfec |= PFEC_insn_fetch;
@@ -1514,6 +1517,7 @@ static hvm_hypercall_t *hvm_hypercall32_
 
 int hvm_do_hypercall(struct cpu_user_regs *regs)
 {
+    struct segment_register sreg;
     int flush, mode = hvm_guest_x86_mode(current);
     uint32_t eax = regs->eax;
 
@@ -1524,7 +1528,8 @@ int hvm_do_hypercall(struct cpu_user_reg
 #endif
     case 4:
     case 2:
-        if ( unlikely(ring_3(regs)) )
+        hvm_get_segment_register(current, x86_seg_ss, &sreg);
+        if ( unlikely(sreg.attr.fields.dpl == 3) )
         {
     default:
             regs->eax = -EPERM;
diff -r 2324110ef2c6 -r 1e3e30670ce4 xen/arch/x86/hvm/instrlen.c
--- a/xen/arch/x86/hvm/instrlen.c       Thu Dec 27 10:39:04 2007 +0000
+++ b/xen/arch/x86/hvm/instrlen.c       Thu Dec 27 10:41:43 2007 +0000
@@ -192,15 +192,15 @@ static uint8_t twobyte_table[256] = {
        return -1;                                                         \
    if ( inst_copy_from_guest(&_x, pc, 1) != 1 ) {                         \
        unsigned long err;                                                 \
-       struct segment_register cs;                                        \
+       struct segment_register ss;                                        \
        gdprintk(XENLOG_WARNING,                                           \
                 "Cannot read from address %lx (eip %lx, mode %d)\n",      \
                 pc, org_pc, address_bytes);                               \
        err = 0; /* Must be not-present: we don't enforce reserved bits */ \
        if ( hvm_nx_enabled(current) )                                     \
            err |= PFEC_insn_fetch;                                        \
-       hvm_get_segment_register(current, x86_seg_cs, &cs);                \
-       if ( cs.attr.fields.dpl != 0 )                                     \
+       hvm_get_segment_register(current, x86_seg_ss, &ss);                \
+       if ( ss.attr.fields.dpl == 3 )                                     \
            err |= PFEC_user_mode;                                         \
        hvm_inject_exception(TRAP_page_fault, err, pc);                    \
        return -1;                                                         \
diff -r 2324110ef2c6 -r 1e3e30670ce4 xen/arch/x86/hvm/platform.c
--- a/xen/arch/x86/hvm/platform.c       Thu Dec 27 10:39:04 2007 +0000
+++ b/xen/arch/x86/hvm/platform.c       Thu Dec 27 10:41:43 2007 +0000
@@ -1074,6 +1074,7 @@ void handle_mmio(paddr_t gpa)
 
     case INSTR_MOVS:
     {
+        struct segment_register sreg;
         unsigned long count = GET_REPEAT_COUNT();
         int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
         unsigned long addr, gfn; 
@@ -1089,7 +1090,8 @@ void handle_mmio(paddr_t gpa)
             addr &= 0xFFFF;
         addr += hvm_get_segment_base(v, x86_seg_es);        
         pfec = PFEC_page_present | PFEC_write_access;
-        if ( ring_3(regs) )
+        hvm_get_segment_register(v, x86_seg_ss, &sreg);
+        if ( sreg.attr.fields.dpl == 3 )
             pfec |= PFEC_user_mode;
         gfn = paging_gva_to_gfn(v, addr, &pfec);
         paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
diff -r 2324110ef2c6 -r 1e3e30670ce4 xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Dec 27 10:39:04 2007 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Thu Dec 27 10:41:43 2007 +0000
@@ -1440,7 +1440,7 @@ static void svm_io_instruction(struct vc
         pfec = PFEC_page_present;
         if ( dir == IOREQ_READ ) /* Read from PIO --> write to RAM */
             pfec |= PFEC_write_access;
-        if ( ring_3(regs) )
+        if ( vmcb->cpl == 3 )
             pfec |= PFEC_user_mode;
         gfn = paging_gva_to_gfn(v, addr, &pfec);
         if ( gfn == INVALID_GFN ) 
diff -r 2324110ef2c6 -r 1e3e30670ce4 xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Dec 27 10:39:04 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Thu Dec 27 10:41:43 2007 +0000
@@ -1754,7 +1754,7 @@ static void vmx_do_str_pio(unsigned long
     pfec = PFEC_page_present;
     if ( dir == IOREQ_READ ) /* Read from PIO --> write to RAM */
         pfec |= PFEC_write_access;
-    if ( ring_3(regs) )
+    if ( ((__vmread(GUEST_SS_AR_BYTES) >> 5) & 3) == 3 )
         pfec |= PFEC_user_mode;
     gfn = paging_gva_to_gfn(current, addr, &pfec);
     if ( gfn == INVALID_GFN )
diff -r 2324110ef2c6 -r 1e3e30670ce4 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Thu Dec 27 10:39:04 2007 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Thu Dec 27 10:41:43 2007 +0000
@@ -101,7 +101,7 @@ int _shadow_mode_refcounts(struct domain
 /* x86 emulator support for the shadow code
  */
 
-static struct segment_register *hvm_get_seg_reg(
+struct segment_register *hvm_get_seg_reg(
     enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt)
 {
     struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg];
@@ -141,6 +141,7 @@ hvm_read(enum x86_segment seg,
          enum hvm_access_type access_type,
          struct sh_emulate_ctxt *sh_ctxt)
 {
+    struct segment_register *sreg;
     unsigned long addr;
     int rc, errcode;
 
@@ -163,7 +164,8 @@ hvm_read(enum x86_segment seg,
      * was mapped here.  This should never happen: we're here because
      * of a write fault at the end of the instruction we're emulating. */ 
     SHADOW_PRINTK("read failed to va %#lx\n", addr);
-    errcode = ring_3(sh_ctxt->ctxt.regs) ? PFEC_user_mode : 0;
+    sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
+    errcode = (sreg->attr.fields.dpl == 3) ? PFEC_user_mode : 0;
     if ( access_type == hvm_access_insn_fetch )
         errcode |= PFEC_insn_fetch;
     hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - rc);
diff -r 2324110ef2c6 -r 1e3e30670ce4 xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Thu Dec 27 10:39:04 2007 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Thu Dec 27 10:41:43 2007 +0000
@@ -4018,16 +4018,18 @@ static mfn_t emulate_gva_to_mfn(struct v
 
 /* Check that the user is allowed to perform this write. 
  * Returns a mapped pointer to write to, or NULL for error. */
-static void * emulate_map_dest(struct vcpu *v,
-                               unsigned long vaddr,
-                               u32 bytes,
-                               struct sh_emulate_ctxt *sh_ctxt)
-{
+static void *emulate_map_dest(struct vcpu *v,
+                              unsigned long vaddr,
+                              u32 bytes,
+                              struct sh_emulate_ctxt *sh_ctxt)
+{
+    struct segment_register *sreg;
     unsigned long offset;
     void *map = NULL;
 
     /* We don't emulate user-mode writes to page tables */
-    if ( ring_3(sh_ctxt->ctxt.regs) ) 
+    sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
+    if ( sreg->attr.fields.dpl == 3 )
         return NULL;
 
     sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
diff -r 2324110ef2c6 -r 1e3e30670ce4 xen/arch/x86/mm/shadow/private.h
--- a/xen/arch/x86/mm/shadow/private.h  Thu Dec 27 10:39:04 2007 +0000
+++ b/xen/arch/x86/mm/shadow/private.h  Thu Dec 27 10:41:43 2007 +0000
@@ -680,7 +680,8 @@ struct x86_emulate_ops *shadow_init_emul
     struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
 void shadow_continue_emulation(
     struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
-
+struct segment_register *hvm_get_seg_reg(
+    enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt);
 
 #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
 /**************************************************************************/

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.