[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] hvm: Code cleanups.



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1206713553 0
# Node ID b5fea3aeb04b14b8ba82d3db18449bd593051113
# Parent  d686808b316903120473833738957c049765f49c
hvm: Code cleanups.
Signed-off-by: Keir Fraser <keir.fraser@xxxxxxxxxx>
---
 xen/arch/x86/hvm/emulate.c        |    6 -
 xen/arch/x86/hvm/hvm.c            |   44 +++++++-----
 xen/arch/x86/hvm/vmx/vmx.c        |   44 +++---------
 xen/include/asm-x86/hvm/io.h      |    3 
 xen/include/asm-x86/hvm/support.h |   19 -----
 xen/include/asm-x86/hvm/vmx/vmx.h |  135 ++++++++++++++++++--------------------
 6 files changed, 107 insertions(+), 144 deletions(-)

diff -r d686808b3169 -r b5fea3aeb04b xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Fri Mar 28 11:17:11 2008 +0000
+++ b/xen/arch/x86/hvm/emulate.c        Fri Mar 28 14:12:33 2008 +0000
@@ -522,12 +522,6 @@ static int hvmemul_write_io(
     unsigned long val,
     struct x86_emulate_ctxt *ctxt)
 {
-    if ( port == 0xe9 )
-    {
-        hvm_print_line(current, val);
-        return X86EMUL_OKAY;
-    }
-
     return hvmemul_do_pio(port, 1, bytes, val, IOREQ_WRITE, 0, 0, NULL);
 }
 
diff -r d686808b3169 -r b5fea3aeb04b xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Mar 28 11:17:11 2008 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Fri Mar 28 14:12:33 2008 +0000
@@ -255,6 +255,30 @@ static int hvm_set_ioreq_page(
     return 0;
 }
 
+static int hvm_print_line(
+    int dir, uint32_t port, uint32_t bytes, uint32_t *val)
+{
+    struct vcpu *curr = current;
+    struct hvm_domain *hd = &curr->domain->arch.hvm_domain;
+    char c = *val;
+
+    BUG_ON(bytes != 1);
+
+    spin_lock(&hd->pbuf_lock);
+    hd->pbuf[hd->pbuf_idx++] = c;
+    if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
+    {
+        if ( c != '\n' )
+            hd->pbuf[hd->pbuf_idx++] = '\n';
+        hd->pbuf[hd->pbuf_idx] = '\0';
+        printk(XENLOG_G_DEBUG "HVM%u: %s", curr->domain->domain_id, hd->pbuf);
+        hd->pbuf_idx = 0;
+    }
+    spin_unlock(&hd->pbuf_lock);
+
+    return 1;
+}
+
 int hvm_domain_initialise(struct domain *d)
 {
     int rc;
@@ -288,6 +312,8 @@ int hvm_domain_initialise(struct domain 
 
     hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq);
     hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
+
+    register_portio_handler(d, 0xe9, 1, hvm_print_line);
 
     rc = hvm_funcs.domain_initialise(d);
     if ( rc != 0 )
@@ -1577,24 +1603,6 @@ unsigned long copy_from_user_hvm(void *t
 
     rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
     return rc ? len : 0; /* fake a copy_from_user() return code */
-}
-
-/* HVM specific printbuf. Mostly used for hvmloader chit-chat. */
-void hvm_print_line(struct vcpu *v, const char c)
-{
-    struct hvm_domain *hd = &v->domain->arch.hvm_domain;
-
-    spin_lock(&hd->pbuf_lock);
-    hd->pbuf[hd->pbuf_idx++] = c;
-    if ( (hd->pbuf_idx == (sizeof(hd->pbuf) - 2)) || (c == '\n') )
-    {
-        if ( c != '\n' )
-            hd->pbuf[hd->pbuf_idx++] = '\n';
-        hd->pbuf[hd->pbuf_idx] = '\0';
-        printk(XENLOG_G_DEBUG "HVM%u: %s", v->domain->domain_id, hd->pbuf);
-        hd->pbuf_idx = 0;
-    }
-    spin_unlock(&hd->pbuf_lock);
 }
 
 #define bitmaskof(idx)  (1U << ((idx) & 31))
diff -r d686808b3169 -r b5fea3aeb04b xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Fri Mar 28 11:17:11 2008 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Mar 28 14:12:33 2008 +0000
@@ -1240,23 +1240,6 @@ static void vmx_do_cpuid(struct cpu_user
     regs->edx = edx;
 }
 
-#define CASE_GET_REG_P(REG, reg)    \
-    case REG_ ## REG: reg_p = (unsigned long *)&(regs->reg); break
-
-#ifdef __i386__
-#define CASE_EXTEND_GET_REG_P
-#else
-#define CASE_EXTEND_GET_REG_P       \
-    CASE_GET_REG_P(R8, r8);         \
-    CASE_GET_REG_P(R9, r9);         \
-    CASE_GET_REG_P(R10, r10);       \
-    CASE_GET_REG_P(R11, r11);       \
-    CASE_GET_REG_P(R12, r12);       \
-    CASE_GET_REG_P(R13, r13);       \
-    CASE_GET_REG_P(R14, r14);       \
-    CASE_GET_REG_P(R15, r15)
-#endif
-
 static void vmx_dr_access(unsigned long exit_qualification,
                           struct cpu_user_regs *regs)
 {
@@ -1280,9 +1263,9 @@ static void vmx_invlpg_intercept(unsigne
 }
 
 #define CASE_SET_REG(REG, reg)      \
-    case REG_ ## REG: regs->reg = value; break
+    case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: regs->reg = value; break
 #define CASE_GET_REG(REG, reg)      \
-    case REG_ ## REG: value = regs->reg; break
+    case VMX_CONTROL_REG_ACCESS_GPR_ ## REG: value = regs->reg; break
 
 #define CASE_EXTEND_SET_REG         \
     CASE_EXTEND_REG(S)
@@ -1408,26 +1391,25 @@ static int vmx_cr_access(unsigned long e
     unsigned long value;
     struct vcpu *v = current;
 
-    switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE )
-    {
-    case TYPE_MOV_TO_CR:
-        gp = exit_qualification & CONTROL_REG_ACCESS_REG;
-        cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
+    switch ( exit_qualification & VMX_CONTROL_REG_ACCESS_TYPE )
+    {
+    case VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR:
+        gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
+        cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
         return mov_to_cr(gp, cr, regs);
-    case TYPE_MOV_FROM_CR:
-        gp = exit_qualification & CONTROL_REG_ACCESS_REG;
-        cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
+    case VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR:
+        gp = exit_qualification & VMX_CONTROL_REG_ACCESS_GPR;
+        cr = exit_qualification & VMX_CONTROL_REG_ACCESS_NUM;
         mov_from_cr(cr, gp, regs);
         break;
-    case TYPE_CLTS:
+    case VMX_CONTROL_REG_ACCESS_TYPE_CLTS:
         v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS;
         vmx_update_guest_cr(v, 0);
         HVMTRACE_0D(CLTS, current);
         break;
-    case TYPE_LMSW:
+    case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
         value = v->arch.hvm_vcpu.guest_cr[0];
-        value = (value & ~0xF) |
-            (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
+        value = (value & ~0xFFFF) | ((exit_qualification >> 16) & 0xFFFF);
         HVMTRACE_1D(LMSW, current, value);
         return !hvm_set_cr0(value);
     default:
diff -r d686808b3169 -r b5fea3aeb04b xen/include/asm-x86/hvm/io.h
--- a/xen/include/asm-x86/hvm/io.h      Fri Mar 28 11:17:11 2008 +0000
+++ b/xen/include/asm-x86/hvm/io.h      Fri Mar 28 14:12:33 2008 +0000
@@ -25,10 +25,9 @@
 #include <public/hvm/ioreq.h>
 #include <public/event_channel.h>
 
-#define MAX_IO_HANDLER             12
+#define MAX_IO_HANDLER             16
 
 #define HVM_PORTIO                  0
-#define HVM_MMIO                    1
 #define HVM_BUFFERED_IO             2
 
 typedef unsigned long (*hvm_mmio_read_t)(struct vcpu *v,
diff -r d686808b3169 -r b5fea3aeb04b xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Fri Mar 28 11:17:11 2008 +0000
+++ b/xen/include/asm-x86/hvm/support.h Fri Mar 28 14:12:33 2008 +0000
@@ -27,12 +27,6 @@
 #include <asm/regs.h>
 #include <asm/processor.h>
 
-#ifndef NDEBUG
-#define HVM_DEBUG 1
-#else
-#define HVM_DEBUG 1
-#endif
-
 static inline vcpu_iodata_t *get_ioreq(struct vcpu *v)
 {
     struct domain *d = v->domain;
@@ -42,17 +36,9 @@ static inline vcpu_iodata_t *get_ioreq(s
     return &p->vcpu_iodata[v->vcpu_id];
 }
 
-/* XXX these are really VMX specific */
-#define TYPE_MOV_TO_DR          (0 << 4)
-#define TYPE_MOV_FROM_DR        (1 << 4)
-#define TYPE_MOV_TO_CR          (0 << 4)
-#define TYPE_MOV_FROM_CR        (1 << 4)
-#define TYPE_CLTS               (2 << 4)
-#define TYPE_LMSW               (3 << 4)
-
 #define HVM_DELIVER_NO_ERROR_CODE  -1
 
-#if HVM_DEBUG
+#ifndef NDEBUG
 #define DBG_LEVEL_0                 (1 << 0)
 #define DBG_LEVEL_1                 (1 << 1)
 #define DBG_LEVEL_2                 (1 << 2)
@@ -131,9 +117,6 @@ enum hvm_copy_result hvm_fetch_from_gues
 enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
     void *buf, unsigned long vaddr, int size, uint32_t pfec);
 
-void hvm_print_line(struct vcpu *v, const char c);
-void hlt_timer_fn(void *data);
-
 #define HVM_HCALL_completed  0 /* hypercall completed - no further action */
 #define HVM_HCALL_preempted  1 /* hypercall preempted - re-execute VMCALL */
 #define HVM_HCALL_invalidate 2 /* invalidate ioemu-dm memory cache        */
diff -r d686808b3169 -r b5fea3aeb04b xen/include/asm-x86/hvm/vmx/vmx.h
--- a/xen/include/asm-x86/hvm/vmx/vmx.h Fri Mar 28 11:17:11 2008 +0000
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h Fri Mar 28 14:12:33 2008 +0000
@@ -95,35 +95,32 @@ void vmx_realmode(struct cpu_user_regs *
 /*
  * Exit Qualifications for MOV for Control Register Access
  */
-#define CONTROL_REG_ACCESS_NUM          0xf     /* 3:0, number of control 
register */
-#define CONTROL_REG_ACCESS_TYPE         0x30    /* 5:4, access type */
-#define CONTROL_REG_ACCESS_REG          0xf00   /* 10:8, general purpose 
register */
-#define LMSW_SOURCE_DATA                (0xFFFF << 16)  /* 16:31 lmsw source */
-#define REG_EAX                         (0 << 8)
-#define REG_ECX                         (1 << 8)
-#define REG_EDX                         (2 << 8)
-#define REG_EBX                         (3 << 8)
-#define REG_ESP                         (4 << 8)
-#define REG_EBP                         (5 << 8)
-#define REG_ESI                         (6 << 8)
-#define REG_EDI                         (7 << 8)
-#define REG_R8                          (8 << 8)
-#define REG_R9                          (9 << 8)
-#define REG_R10                         (10 << 8)
-#define REG_R11                         (11 << 8)
-#define REG_R12                         (12 << 8)
-#define REG_R13                         (13 << 8)
-#define REG_R14                         (14 << 8)
-#define REG_R15                         (15 << 8)
-
-/*
- * Exit Qualifications for MOV for Debug Register Access
- */
-#define DEBUG_REG_ACCESS_NUM            0x7     /* 2:0, number of debug 
register */
-#define DEBUG_REG_ACCESS_TYPE           0x10    /* 4, direction of access */
-#define TYPE_MOV_TO_DR                  (0 << 4)
-#define TYPE_MOV_FROM_DR                (1 << 4)
-#define DEBUG_REG_ACCESS_REG            0xf00   /* 11:8, general purpose 
register */
+ /* 3:0 - control register number (CRn) */
+#define VMX_CONTROL_REG_ACCESS_NUM      0xf
+ /* 5:4 - access type (CR write, CR read, CLTS, LMSW) */
+#define VMX_CONTROL_REG_ACCESS_TYPE     0x30
+ /* 10:8 - general purpose register operand */
+#define VMX_CONTROL_REG_ACCESS_GPR      0xf00
+#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_TO_CR   (0 << 4)
+#define VMX_CONTROL_REG_ACCESS_TYPE_MOV_FROM_CR (1 << 4)
+#define VMX_CONTROL_REG_ACCESS_TYPE_CLTS        (2 << 4)
+#define VMX_CONTROL_REG_ACCESS_TYPE_LMSW        (3 << 4)
+#define VMX_CONTROL_REG_ACCESS_GPR_EAX  (0 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_ECX  (1 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_EDX  (2 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_EBX  (3 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_ESP  (4 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_EBP  (5 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_ESI  (6 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_EDI  (7 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R8   (8 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R9   (9 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R10  (10 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R11  (11 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R12  (12 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R13  (13 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R14  (14 << 8)
+#define VMX_CONTROL_REG_ACCESS_GPR_R15  (15 << 8)
 
 /*
  * Access Rights
@@ -155,72 +152,72 @@ void vmx_realmode(struct cpu_user_regs *
 
 static inline void __vmptrld(u64 addr)
 {
-    __asm__ __volatile__ ( VMPTRLD_OPCODE
-                           MODRM_EAX_06
-                           /* CF==1 or ZF==1 --> crash (ud2) */
-                           "ja 1f ; ud2 ; 1:\n"
-                           :
-                           : "a" (&addr)
-                           : "memory");
+    asm volatile ( VMPTRLD_OPCODE
+                   MODRM_EAX_06
+                   /* CF==1 or ZF==1 --> crash (ud2) */
+                   "ja 1f ; ud2 ; 1:\n"
+                   :
+                   : "a" (&addr)
+                   : "memory");
 }
 
 static inline void __vmptrst(u64 addr)
 {
-    __asm__ __volatile__ ( VMPTRST_OPCODE
-                           MODRM_EAX_07
-                           :
-                           : "a" (&addr)
-                           : "memory");
+    asm volatile ( VMPTRST_OPCODE
+                   MODRM_EAX_07
+                   :
+                   : "a" (&addr)
+                   : "memory");
 }
 
 static inline void __vmpclear(u64 addr)
 {
-    __asm__ __volatile__ ( VMCLEAR_OPCODE
-                           MODRM_EAX_06
-                           /* CF==1 or ZF==1 --> crash (ud2) */
-                           "ja 1f ; ud2 ; 1:\n"
-                           :
-                           : "a" (&addr)
-                           : "memory");
+    asm volatile ( VMCLEAR_OPCODE
+                   MODRM_EAX_06
+                   /* CF==1 or ZF==1 --> crash (ud2) */
+                   "ja 1f ; ud2 ; 1:\n"
+                   :
+                   : "a" (&addr)
+                   : "memory");
 }
 
 static inline unsigned long __vmread(unsigned long field)
 {
     unsigned long ecx;
 
-    __asm__ __volatile__ ( VMREAD_OPCODE
-                           MODRM_EAX_ECX
-                           /* CF==1 or ZF==1 --> crash (ud2) */
-                           "ja 1f ; ud2 ; 1:\n"
-                           : "=c" (ecx)
-                           : "a" (field)
-                           : "memory");
+    asm volatile ( VMREAD_OPCODE
+                   MODRM_EAX_ECX
+                   /* CF==1 or ZF==1 --> crash (ud2) */
+                   "ja 1f ; ud2 ; 1:\n"
+                   : "=c" (ecx)
+                   : "a" (field)
+                   : "memory");
 
     return ecx;
 }
 
 static inline void __vmwrite(unsigned long field, unsigned long value)
 {
-    __asm__ __volatile__ ( VMWRITE_OPCODE
-                           MODRM_EAX_ECX
-                           /* CF==1 or ZF==1 --> crash (ud2) */
-                           "ja 1f ; ud2 ; 1:\n"
-                           : 
-                           : "a" (field) , "c" (value)
-                           : "memory");
+    asm volatile ( VMWRITE_OPCODE
+                   MODRM_EAX_ECX
+                   /* CF==1 or ZF==1 --> crash (ud2) */
+                   "ja 1f ; ud2 ; 1:\n"
+                   : 
+                   : "a" (field) , "c" (value)
+                   : "memory");
 }
 
 static inline unsigned long __vmread_safe(unsigned long field, int *error)
 {
     unsigned long ecx;
 
-    __asm__ __volatile__ ( VMREAD_OPCODE
-                           MODRM_EAX_ECX
-                           /* CF==1 or ZF==1 --> rc = -1 */
-                           "setna %b0 ; neg %0"
-                           : "=q" (*error), "=c" (ecx)
-                           : "0" (0), "a" (field)
-                           : "memory");
+    asm volatile ( VMREAD_OPCODE
+                   MODRM_EAX_ECX
+                   /* CF==1 or ZF==1 --> rc = -1 */
+                   "setna %b0 ; neg %0"
+                   : "=q" (*error), "=c" (ecx)
+                   : "0" (0), "a" (field)
+                   : "memory");
 
     return ecx;
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.