[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH] [HVM] fix tracing for 64bit guests



Hi,

Xen tracing some times ago used to put values of type 'long' into the trace buffer. This has changed to uint32_t. Some trace points log virtual addresses, which get cropped to 32bit in this case. There were some inline functions to handle at least PF_XEN and VMEXIT, which caused a lot of code duplication. The attached patch fixes several issues:
1. fix and extend tools/xentrace/formats
2. Fix xentrace_format to handle up to 7 parameters
3. create convenience macros to properly log long values
4. remove the inline functions in hvm/trace.h and replace them by macros
5. Change the CPUID trace to work correctly
6. group HVM trace points enable mechanism

I used a similar approach as in PV tracing with bit 8 indicating 64bit pointers.

Discussion welcome!

Signed-off-by: Andre Przywara <andre.przywara@xxxxxxx>

BTW: Why is xentrace_format so awfully slow? I have rewritten the tool in C and reached a speedup of more than 20 times. If someone bugs me I will share the code...

Regards,
Andre.

--
Andre Przywara
AMD-Operating System Research Center (OSRC), Dresden, Germany
Tel: +49 351 277-84917
----to satisfy European Law for business letters:
AMD Saxony Limited Liability Company & Co. KG,
Wilschdorfer Landstr. 101, 01109 Dresden, Germany
Register Court Dresden: HRA 4896, General Partner authorized
to represent: AMD Saxony LLC (Wilmington, Delaware, US)
General Manager of AMD Saxony LLC: Dr. Hans-R. Deppe, Thomas McCoy
diff -r b320cfe1f10f tools/xentrace/formats
--- a/tools/xentrace/formats    Thu Jun 05 13:04:07 2008 +0100
+++ b/tools/xentrace/formats    Fri Jun 06 12:04:01 2008 +0200
@@ -1,48 +1,67 @@ 0x00000000  CPU%(cpu)d  %(tsc)d (+%(relt
 0x00000000  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  unknown (0x%(event)016x)  [ 
0x%(1)08x 0x%(2)08x 0x%(3)08x 0x%(4)08x 0x%(5)08x ]
 
-0x0001f001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  lost_records        0x%(1)08x
-0x0001f002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  wrap_buffer         0x%(1)08x
-0x0001f003  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  cpu_change          0x%(1)08x
+0x0001f001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  lost_records      0x%(1)08x
+0x0001f002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  wrap_buffer       0x%(1)08x
+0x0001f003  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  cpu_change        0x%(1)08x
 
-0x0002f001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  sched_add_domain       [ domid 
= 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  sched_rem_domain       [ domid 
= 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f003  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  domain_sleep           [ domid 
= 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f004  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  domain_wake            [ domid 
= 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f005  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  do_yield               [ domid 
= 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f006  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  do_block               [ domid 
= 0x%(1)08x, edomid = 0x%(2)08x ]
-0x0002f007  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  domain_shutdown                
[ domid = 0x%(1)08x, edomid = 0x%(2)08x, reason = 0x%(3)08x ]
+0x0002f001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  sched_add_domain  [ domid = 
0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  sched_rem_domain  [ domid = 
0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f003  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  domain_sleep      [ domid = 
0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f004  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  domain_wake       [ domid = 
0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f005  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  do_yield          [ domid = 
0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f006  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  do_block          [ domid = 
0x%(1)08x, edomid = 0x%(2)08x ]
+0x0002f007  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  domain_shutdown          [ 
domid = 0x%(1)08x, edomid = 0x%(2)08x, reason = 0x%(3)08x ]
 0x0002f008  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  sched_ctl
-0x0002f009  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  sched_adjdom           [ domid 
= 0x%(1)08x ]
-0x0002f00a  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  __enter_scheduler      [ 
prev<domid:edomid> = 0x%(1)08x : 0x%(2)08x, next<domid:edomid> = 0x%(3)08x : 
0x%(4)08x ]
+0x0002f009  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  sched_adjdom      [ domid = 
0x%(1)08x ]
+0x0002f00a  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  __enter_scheduler [ 
prev<domid:edomid> = 0x%(1)08x : 0x%(2)08x, next<domid:edomid> = 0x%(3)08x : 
0x%(4)08x ]
 0x0002f00B  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  s_timer_fn
 0x0002f00c  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  t_timer_fn
 0x0002f00d  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  dom_timer_fn
+0x0002f00e  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  switch_infprev    [ old_domid 
= 0x%(1)08x, runtime = %(2)d ]
+0x0002f00f  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  switch_infnext    [ new_domid 
= 0x%(1)08x, time = %(2)d, r_time = %(3)d ]
 
 0x00081001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  VMENTRY     [ dom:vcpu = 
0x%(1)08x ]
 0x00081002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  VMEXIT      [ dom:vcpu = 
0x%(1)08x, exitcode = 0x%(2)08x, rIP  = 0x%(3)08x ]
-0x00082001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  PF_XEN      [ dom:vcpu = 
0x%(1)08x, errorcode = 0x%(2)02x, virt = 0x%(3)08x ]
-0x00082002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  PF_INJECT   [ dom:vcpu = 
0x%(1)08x, virt = 0x%(2)08x, errorcode = 0x%(3)02x ]
+0x00081102  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  VMEXIT      [ dom:vcpu = 
0x%(1)08x, exitcode = 0x%(2)08x, rIP  = 0x%(3)016x ]
+0x00082001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  PF_XEN      [ dom:vcpu = 
0x%(1)08x, errorcode = 0x%(3)02x, virt = 0x%(2)08x ]
+0x00082101  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  PF_XEN      [ dom:vcpu = 
0x%(1)08x, errorcode = 0x%(3)02x, virt = 0x%(2)016x ]
+0x00082002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  PF_INJECT   [ dom:vcpu = 
0x%(1)08x, errorcode = 0x%(2)02x, virt = 0x%(3)08x ]
+0x00082102  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  PF_INJECT   [ dom:vcpu = 
0x%(1)08x,  errorcode = 0x%(2)02x, virt = 0x%(3)016x ]
 0x00082003  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  INJ_EXC     [ dom:vcpu = 
0x%(1)08x, vector = 0x%(2)02x, errorcode = 0x%(3)04x ]
 0x00082004  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  INJ_VIRQ    [ dom:vcpu = 
0x%(1)08x, vector = 0x%(2)02x, fake = %(3)d ]
 0x00082005  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  REINJ_VIRQ  [ dom:vcpu = 
0x%(1)08x, vector = 0x%(2)02x ]
 0x00082006  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  IO_READ     [ dom:vcpu = 
0x%(1)08x, port = 0x%(2)04x, size = %(3)d ]
 0x00082007  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  IO_WRITE    [ dom:vcpu = 
0x%(1)08x, port = 0x%(2)04x, size = %(3)d ]
 0x00082008  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  CR_READ     [ dom:vcpu = 
0x%(1)08x, CR# = %(2)d, value = 0x%(3)08x ]
+0x00082108  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  CR_READ     [ dom:vcpu = 
0x%(1)08x, CR# = %(2)d, value = 0x%(3)016x ]
 0x00082009  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  CR_WRITE    [ dom:vcpu = 
0x%(1)08x, CR# = %(2)d, value = 0x%(3)08x ]
+0x00082109  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  CR_WRITE    [ dom:vcpu = 
0x%(1)08x, CR# = %(2)d, value = 0x%(3)016x ]
 0x0008200A  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  DR_READ     [ dom:vcpu = 
0x%(1)08x ]
 0x0008200B  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  DR_WRITE    [ dom:vcpu = 
0x%(1)08x ]
-0x0008200C  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  MSR_READ    [ dom:vcpu = 
0x%(1)08x, MSR# = 0x%(2)08x, value = 0x%(3)08x ]
-0x0008200D  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  MSR_WRITE   [ dom:vcpu = 
0x%(1)08x, MSR# = 0x%(2)08x, value = 0x%(3)08x ]
-0x0008200E  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  CPUID       [ dom:vcpu = 
0x%(1)08x, func = 0x%(2)08x, eax:ebx = 0x%(3)016x, ecx:edx = 0x%(4)016x ]
+0x0008200C  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  MSR_READ    [ dom:vcpu = 
0x%(1)08x, MSR# = 0x%(2)08x, value = 0x%(3)016x ]
+0x0008200D  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  MSR_WRITE   [ dom:vcpu = 
0x%(1)08x, MSR# = 0x%(2)08x, value = 0x%(3)016x ]
+0x0008200E  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  CPUID       [ dom:vcpu = 
0x%(1)08x, func = 0x%(2)08x, eax = 0x%(3)08x, ebx = 0x%(4)08x, ecx=0x%(5)08x, 
edx = 0x%(6)08x ]
 0x0008200F  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  INTR        [ dom:vcpu = 
0x%(1)08x, vector = 0x%(2)02x ]
 0x00082010  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  NMI         [ dom:vcpu = 
0x%(1)08x ]
 0x00082011  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  SMI         [ dom:vcpu = 
0x%(1)08x ]
 0x00082012  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  VMMCALL     [ dom:vcpu = 
0x%(1)08x, func = 0x%(2)08x ]
 0x00082013  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  HLT         [ dom:vcpu = 
0x%(1)08x, intpending = %(2)d ]
-0x00082014  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  INVLPG      [ dom:vcpu = 
0x%(1)08x, virt = 0x%(2)08x, invlpga = %(3)d, asid = 0x%(4)02x ]
+0x00082014  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  INVLPG      [ dom:vcpu = 
0x%(1)08x, is invlpga? = %(2)d, virt = 0x%(3)08x ]
+0x00082114  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  INVLPG      [ dom:vcpu = 
0x%(1)08x, is invlpga? = %(2)d, virt = 0x%(3)016x ]
 0x00082015  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  MCE         [ dom:vcpu = 
0x%(1)08x ]
 0x00082016  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  IO_ASSIST   [ dom:vcpu = 
0x%(1)08x, data = 0x%(2)04x ]
 0x00082017  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  MMIO_ASSIST [ dom:vcpu = 
0x%(1)08x, data = 0x%(2)04x ]
 0x00082018  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  CLTS        [ dom:vcpu = 
0x%(1)08x ]
 0x00082019  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  LMSW        [ dom:vcpu = 
0x%(1)08x, value = 0x%(2)08x ]
-0x00082020  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  PF_XEN64    [ dom:vcpu = 
0x%(1)08x, errorcode = 0x%(2)02x, virt = 0x%(3)08x ]
+0x00082119  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  LMSW        [ dom:vcpu = 
0x%(1)08x, value = 0x%(2)016x ]
+
+0x0010f001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_grant_map      [ domid = 
%(1)d ]
+0x0010f002  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_grant_unmap    [ domid = 
%(1)d ]
+0x0010f003  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_grant_transfer [ domid = 
%(1)d ]
+
+0x0020f001  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  hypercall  [ eip = 0x%(1)08x, 
eax = 0x%(2)08x ]
+0x0020f101  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  hypercall  [ rip = 0x%(1)016x, 
eax = 0x%(2)08x ]
+0x0020f003  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  trap       [ eip = 0x%(1)08x, 
trapnr:error = 0x%(2)08x ]
+0x0020f103  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  trap       [ rip = 0x%(1)016x, 
trapnr:error = 0x%(2)08x ]
+0x0020f004  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_fault [ eip = 0x%(1)08x, 
addr = 0x%(2)08x, error = 0x%(3)08x ]
+0x0020f104  CPU%(cpu)d  %(tsc)d (+%(reltsc)8d)  page_fault [ rip = 0x%(1)16x, 
addr = 0x%(3)16x, error = 0x%(5)08x ]
diff -r b320cfe1f10f tools/xentrace/xentrace_format
--- a/tools/xentrace/xentrace_format    Thu Jun 05 13:04:07 2008 +0100
+++ b/tools/xentrace/xentrace_format    Fri Jun 06 12:04:01 2008 +0200
@@ -17,12 +17,12 @@ def usage():
           {event_id}{whitespace}{text format string}
 
           The textual format string may include format specifiers, such as:
-            %(cpu)d, %(tsc)d, %(event)d, %(1)d, %(2)d, %(3)d, %(4)d, %(5)d
+            %(cpu)d, %(tsc)d, %(event)d, %(1)d, %(2)d, %(3)d, %(4)d, ... 
           [ the 'd' format specifier outputs in decimal, alternatively 'x'
             will output in hexadecimal and 'o' will output in octal ]
 
           Which correspond to the CPU number, event ID, timestamp counter and
-          the 5 data fields from the trace record.  There should be one such
+          the 7 data fields from the trace record.  There should be one such
           rule for each type of event.
           
           Depending on your system and the volume of trace buffer data,
@@ -84,7 +84,7 @@ defs = read_defs(arg[0])
 defs = read_defs(arg[0])
 
 # structure of trace record (as output by xentrace):
-# HDR(I) {TSC(Q)} D1(I) D2(I) D3(I) D4(I) D5(I)
+# HDR(I) {TSC(Q)} D1(I) D2(I) D3(I) D4(I) D5(I) D6(I) D7(I)
 #
 # HDR consists of EVENT:28:, n_data:3:, tsc_in:1:
 # EVENT means Event ID
@@ -101,6 +101,8 @@ D3REC  = "III"
 D3REC  = "III"
 D4REC  = "IIII"
 D5REC  = "IIIII"
+D6REC  = "IIIIII"
+D7REC  = "IIIIIII"
 
 last_tsc = [0]
 
@@ -121,6 +123,8 @@ while not interrupted:
         d3 = 0
         d4 = 0
         d5 = 0
+        d6 = 0
+        d7 = 0
 
         tsc = 0
 
@@ -155,6 +159,16 @@ while not interrupted:
             if not line:
                 break
             (d1, d2, d3, d4, d5) = struct.unpack(D5REC, line)
+        if n_data == 6:
+            line = sys.stdin.read(struct.calcsize(D6REC))
+            if not line:
+                break
+            (d1, d2, d3, d4, d5, d6) = struct.unpack(D6REC, line)
+        if n_data == 7:
+            line = sys.stdin.read(struct.calcsize(D7REC))
+            if not line:
+                break
+            (d1, d2, d3, d4, d5, d6, d7) = struct.unpack(D7REC, line)
 
         # Event field is 28bit of 'uint32_t' in header, not 'long'.
         event &= 0x0fffffff
@@ -191,7 +205,9 @@ while not interrupted:
                 '2'     : d2,
                 '3'     : d3,
                 '4'     : d4,
-                '5'     : d5    }
+                '5'     : d5,
+                '6'     : d6,
+                '7'     : d7    }
 
         try:
 
diff -r b320cfe1f10f xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Thu Jun 05 13:04:07 2008 +0100
+++ b/xen/arch/x86/hvm/svm/svm.c        Fri Jun 06 12:04:01 2008 +0200
@@ -757,7 +757,7 @@ static void svm_inject_exception(
     if ( trapnr == TRAP_page_fault )
     {
         vmcb->cr2 = curr->arch.hvm_vcpu.guest_cr[2] = cr2;
-        HVMTRACE_2D(PF_INJECT, curr, curr->arch.hvm_vcpu.guest_cr[2], errcode);
+        HVMTRACE_LONG_2D(PF_INJECT, curr, errcode, TRC_PAR_LONG(cr2));
     }
     else
     {
@@ -914,8 +914,7 @@ static void svm_cpuid_intercept(
             __clear_bit(X86_FEATURE_APIC & 31, edx);
     }
 
-    HVMTRACE_3D(CPUID, v, input,
-                ((uint64_t)*eax << 32) | *ebx, ((uint64_t)*ecx << 32) | *edx);
+    HVMTRACE_5D (CPUID, v, input, *eax, *ebx, *ecx, *edx);
 }
 
 static void svm_vmexit_do_cpuid(struct cpu_user_regs *regs)
@@ -1014,7 +1013,7 @@ static int svm_msr_read_intercept(struct
     regs->edx = msr_content >> 32;
 
  done:
-    hvmtrace_msr_read(v, ecx, msr_content);
+    HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx);
     HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
                 ecx, (unsigned long)regs->eax, (unsigned long)regs->edx);
     return X86EMUL_OKAY;
@@ -1033,7 +1032,7 @@ static int svm_msr_write_intercept(struc
 
     msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
 
-    hvmtrace_msr_write(v, ecx, msr_content);
+    HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx);
 
     switch ( ecx )
     {
@@ -1153,7 +1152,7 @@ static void svm_invlpg_intercept(unsigne
 static void svm_invlpg_intercept(unsigned long vaddr)
 {
     struct vcpu *curr = current;
-    HVMTRACE_2D(INVLPG, curr, 0, vaddr);
+    HVMTRACE_LONG_2D(INVLPG, curr, 0, TRC_PAR_LONG(vaddr));
     paging_invlpg(curr, vaddr);
     svm_asid_g_invlpg(curr, vaddr);
 }
@@ -1176,7 +1175,12 @@ asmlinkage void svm_vmexit_handler(struc
 
     exit_reason = vmcb->exitcode;
 
-    hvmtrace_vmexit(v, regs->eip, exit_reason);
+    if ( hvm_long_mode_enabled(v) )
+        HVMTRACE_ND (VMEXIT64, 1/*cycles*/, v, 3, exit_reason,
+            regs->eip & 0xFFFFFFFF, regs->eip >> 32, 0, 0, 0);
+    else
+        HVMTRACE_ND (VMEXIT, 1/*cycles*/, v, 2, exit_reason,
+            regs->eip, 0, 0, 0, 0);
 
     if ( unlikely(exit_reason == VMEXIT_INVALID) )
     {
@@ -1244,7 +1248,10 @@ asmlinkage void svm_vmexit_handler(struc
 
         if ( paging_fault(va, regs) )
         {
-            HVMTRACE_2D(PF_XEN, v, va, regs->error_code);
+            if (hvm_long_mode_enabled(v))
+                HVMTRACE_LONG_2D(PF_XEN, v, regs->error_code, 
TRC_PAR_LONG(va));
+            else
+                HVMTRACE_2D(PF_XEN, v, regs->error_code, va);
             break;
         }
 
@@ -1382,7 +1389,7 @@ asmlinkage void svm_vmexit_handler(struc
 
 asmlinkage void svm_trace_vmentry(void)
 {
-    hvmtrace_vmentry(current);
+    HVMTRACE_ND (VMENTRY, 1/*cycles*/, current, 0, 0, 0, 0, 0, 0, 0);
 }
   
 /*
diff -r b320cfe1f10f xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Thu Jun 05 13:04:07 2008 +0100
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Fri Jun 06 12:04:01 2008 +0200
@@ -1107,7 +1107,8 @@ static void __vmx_inject_exception(
     __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
 
     if ( trap == TRAP_page_fault )
-        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
+        HVMTRACE_LONG_2D(PF_INJECT, v, error_code,
+            TRC_PAR_LONG(v->arch.hvm_vcpu.guest_cr[2]));
     else
         HVMTRACE_2D(INJ_EXC, v, trap, error_code);
 }
@@ -1328,8 +1329,7 @@ static void vmx_cpuid_intercept(
             break;
     }
 
-    HVMTRACE_3D(CPUID, current, input,
-                ((uint64_t)*eax << 32) | *ebx, ((uint64_t)*ecx << 32) | *edx);
+    HVMTRACE_5D (CPUID, current, input, *eax, *ebx, *ecx, *edx);
 }
 
 static void vmx_do_cpuid(struct cpu_user_regs *regs)
@@ -1367,7 +1367,7 @@ static void vmx_invlpg_intercept(unsigne
 static void vmx_invlpg_intercept(unsigned long vaddr)
 {
     struct vcpu *curr = current;
-    HVMTRACE_2D(INVLPG, curr, /*invlpga=*/ 0, vaddr);
+    HVMTRACE_LONG_2D(INVLPG, curr, /*invlpga=*/ 0, TRC_PAR_LONG(vaddr));
     if ( paging_invlpg(curr, vaddr) )
         vpid_sync_vcpu_gva(curr, vaddr);
 }
@@ -1418,7 +1418,7 @@ static int mov_to_cr(int gp, int cr, str
         goto exit_and_crash;
     }
 
-    HVMTRACE_2D(CR_WRITE, v, cr, value);
+    HVMTRACE_LONG_2D(CR_WRITE, v, cr, TRC_PAR_LONG(value));
 
     HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
 
@@ -1489,7 +1489,7 @@ static void mov_from_cr(int cr, int gp, 
         break;
     }
 
-    HVMTRACE_2D(CR_READ, v, cr, value);
+    HVMTRACE_LONG_2D(CR_READ, v, cr, TRC_PAR_LONG(value));
 
     HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR%d, value = %lx", cr, value);
 }
@@ -1520,7 +1520,7 @@ static int vmx_cr_access(unsigned long e
     case VMX_CONTROL_REG_ACCESS_TYPE_LMSW:
         value = v->arch.hvm_vcpu.guest_cr[0];
         value = (value & ~0xFFFF) | ((exit_qualification >> 16) & 0xFFFF);
-        HVMTRACE_1D(LMSW, current, value);
+        HVMTRACE_LONG_1D(LMSW, current, value);
         return !hvm_set_cr0(value);
     default:
         BUG();
@@ -1675,7 +1675,7 @@ static int vmx_msr_read_intercept(struct
     regs->edx = (uint32_t)(msr_content >> 32);
 
 done:
-    hvmtrace_msr_read(v, ecx, msr_content);
+    HVMTRACE_3D (MSR_READ, v, ecx, regs->eax, regs->edx);
     HVM_DBG_LOG(DBG_LEVEL_1, "returns: ecx=%x, eax=%lx, edx=%lx",
                 ecx, (unsigned long)regs->eax,
                 (unsigned long)regs->edx);
@@ -1786,7 +1786,7 @@ static int vmx_msr_write_intercept(struc
 
     msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
 
-    hvmtrace_msr_write(v, ecx, msr_content);
+    HVMTRACE_3D (MSR_WRITE, v, ecx, regs->eax, regs->edx);
 
     switch ( ecx )
     {
@@ -2020,7 +2020,12 @@ asmlinkage void vmx_vmexit_handler(struc
 
     exit_reason = __vmread(VM_EXIT_REASON);
 
-    hvmtrace_vmexit(v, regs->eip, exit_reason);
+    if ( hvm_long_mode_enabled(v) )
+        HVMTRACE_ND (VMEXIT64, 1/*cycles*/, v, 3, exit_reason,
+            regs->eip & 0xFFFFFFFF, regs->eip >> 32, 0, 0, 0);
+    else
+        HVMTRACE_ND (VMEXIT, 1/*cycles*/, v, 2, exit_reason,
+            regs->eip, 0, 0, 0, 0);
 
     perfc_incra(vmexits, exit_reason);
 
@@ -2109,7 +2114,12 @@ asmlinkage void vmx_vmexit_handler(struc
 
             if ( paging_fault(exit_qualification, regs) )
             {
-                hvmtrace_pf_xen(v, exit_qualification, regs->error_code);
+                if ( hvm_long_mode_enabled(v) )
+                    HVMTRACE_LONG_2D (PF_XEN, v, regs->error_code,
+                        TRC_PAR_LONG(exit_qualification) );
+                else
+                    HVMTRACE_2D (PF_XEN, v,
+                        regs->error_code, exit_qualification );
                 break;
             }
 
@@ -2271,7 +2281,7 @@ asmlinkage void vmx_vmexit_handler(struc
 
 asmlinkage void vmx_trace_vmentry(void)
 {
-    hvmtrace_vmentry(current);
+    HVMTRACE_ND (VMENTRY, 1/*cycles*/, current, 0, 0, 0, 0, 0, 0, 0);
 }
 
 /*
diff -r b320cfe1f10f xen/arch/x86/trace.c
--- a/xen/arch/x86/trace.c      Thu Jun 05 13:04:07 2008 +0100
+++ b/xen/arch/x86/trace.c      Fri Jun 06 12:04:01 2008 +0200
@@ -7,8 +7,8 @@
 #include <xen/trace.h>
 
 #ifndef __x86_64__
-#undef TRC_PV_64_FLAG
-#define TRC_PV_64_FLAG 0
+#undef TRC_64_FLAG
+#define TRC_64_FLAG 0
 #endif
 
 asmlinkage void trace_hypercall(void)
@@ -38,7 +38,7 @@ asmlinkage void trace_hypercall(void)
         u32 event;
 
         event = TRC_PV_HYPERCALL;
-        event |= TRC_PV_64_FLAG;
+        event |= TRC_64_FLAG;
         d.eip = regs->eip;
         d.eax = regs->eax;
 
@@ -84,7 +84,7 @@ void __trace_pv_trap(int trapnr, unsigne
         d.use_error_code=!!use_error_code;
                 
         event = TRC_PV_TRAP;
-        event |= TRC_PV_64_FLAG;
+        event |= TRC_64_FLAG;
         __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
     }
 }
@@ -119,7 +119,7 @@ void __trace_pv_page_fault(unsigned long
         d.addr = addr;
         d.error_code = error_code;
         event = TRC_PV_PAGE_FAULT;
-        event |= TRC_PV_64_FLAG;
+        event |= TRC_64_FLAG;
         __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
     }
 }
@@ -135,7 +135,7 @@ void __trace_trap_one_addr(unsigned even
     else
 #endif        
     {
-        event |= TRC_PV_64_FLAG;
+        event |= TRC_64_FLAG;
         __trace_var(event, 1, sizeof(va), (unsigned char *)&va);
     }
 }
@@ -161,7 +161,7 @@ void __trace_trap_two_addr(unsigned even
         } __attribute__((packed)) d;
         d.va1=va1;
         d.va2=va2;
-        event |= TRC_PV_64_FLAG;
+        event |= TRC_64_FLAG;
         __trace_var(event, 1, sizeof(d), (unsigned char *)&d);
     }
 }
@@ -207,7 +207,7 @@ void __trace_ptwr_emulation(unsigned lon
 
         event = ((CONFIG_PAGING_LEVELS == 3) ?
                  TRC_PV_PTWR_EMULATION_PAE : TRC_PV_PTWR_EMULATION);
-        event |= TRC_PV_64_FLAG;
+        event |= TRC_64_FLAG;
         __trace_var(event, 1/*tsc*/, sizeof(d), (unsigned char *)&d);
     }
 }
diff -r b320cfe1f10f xen/include/asm-x86/hvm/trace.h
--- a/xen/include/asm-x86/hvm/trace.h   Thu Jun 05 13:04:07 2008 +0100
+++ b/xen/include/asm-x86/hvm/trace.h   Fri Jun 06 12:04:01 2008 +0200
@@ -3,173 +3,66 @@
 
 #include <xen/trace.h>
 
-#define DO_TRC_HVM_VMENTRY     1
-#define DO_TRC_HVM_VMEXIT      1
-#define DO_TRC_HVM_PF_XEN      1
-#define DO_TRC_HVM_PF_INJECT   1
-#define DO_TRC_HVM_INJ_EXC     1
-#define DO_TRC_HVM_INJ_VIRQ    1
-#define DO_TRC_HVM_REINJ_VIRQ  1
-#define DO_TRC_HVM_IO_READ     1
-#define DO_TRC_HVM_IO_WRITE    1
-#define DO_TRC_HVM_CR_READ     1
-#define DO_TRC_HVM_CR_WRITE    1
-#define DO_TRC_HVM_DR_READ     1
-#define DO_TRC_HVM_DR_WRITE    1
-#define DO_TRC_HVM_MSR_READ    1
-#define DO_TRC_HVM_MSR_WRITE   1
-#define DO_TRC_HVM_CPUID       1
-#define DO_TRC_HVM_INTR        1
-#define DO_TRC_HVM_NMI         1
-#define DO_TRC_HVM_MCE         1
-#define DO_TRC_HVM_SMI         1
-#define DO_TRC_HVM_VMMCALL     1
-#define DO_TRC_HVM_HLT         1
-#define DO_TRC_HVM_INVLPG      1
-#define DO_TRC_HVM_IO_ASSIST   1
-#define DO_TRC_HVM_MMIO_ASSIST 1
-#define DO_TRC_HVM_CLTS        1
-#define DO_TRC_HVM_LMSW        1
+#define DEFAULT_HVM_TRACE_ON  1
+#define DEFAULT_HVM_TRACE_OFF 0
 
-static inline void hvmtrace_vmexit(struct vcpu *v,
-                                   unsigned long rip,
-                                   unsigned long exit_reason)
-{
-    if ( likely(!tb_init_done) )
-        return;
+#define DEFAULT_HVM_VMSWITCH   DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_PF         DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_INJECT     DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_IO         DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_REGACCESS  DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_MISC       DEFAULT_HVM_TRACE_ON
+#define DEFAULT_HVM_INTR       DEFAULT_HVM_TRACE_ON
+
+#define DO_TRC_HVM_VMENTRY     DEFAULT_HVM_VMSWITCH
+#define DO_TRC_HVM_VMEXIT      DEFAULT_HVM_VMSWITCH
+#define DO_TRC_HVM_VMEXIT64    DEFAULT_HVM_VMSWITCH
+#define DO_TRC_HVM_PF_XEN      DEFAULT_HVM_PF
+#define DO_TRC_HVM_PF_XEN64    DEFAULT_HVM_PF
+#define DO_TRC_HVM_PF_INJECT   DEFAULT_HVM_PF
+#define DO_TRC_HVM_PF_INJECT64 DEFAULT_HVM_PF
+#define DO_TRC_HVM_INJ_EXC     DEFAULT_HVM_INJECT
+#define DO_TRC_HVM_INJ_VIRQ    DEFAULT_HVM_INJECT
+#define DO_TRC_HVM_REINJ_VIRQ  DEFAULT_HVM_INJECT
+#define DO_TRC_HVM_IO_READ     DEFAULT_HVM_IO
+#define DO_TRC_HVM_IO_WRITE    DEFAULT_HVM_IO
+#define DO_TRC_HVM_CR_READ     DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_CR_READ64   DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_CR_WRITE    DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_CR_WRITE64  DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_DR_READ     DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_DR_WRITE    DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_MSR_READ    DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_MSR_WRITE   DEFAULT_HVM_REGACCESS
+#define DO_TRC_HVM_CPUID       DEFAULT_HVM_MISC
+#define DO_TRC_HVM_INTR        DEFAULT_HVM_INTR
+#define DO_TRC_HVM_NMI         DEFAULT_HVM_INTR
+#define DO_TRC_HVM_MCE         DEFAULT_HVM_INTR
+#define DO_TRC_HVM_SMI         DEFAULT_HVM_INTR
+#define DO_TRC_HVM_VMMCALL     DEFAULT_HVM_MISC
+#define DO_TRC_HVM_HLT         DEFAULT_HVM_MISC
+#define DO_TRC_HVM_INVLPG      DEFAULT_HVM_MISC
+#define DO_TRC_HVM_INVLPG64    DEFAULT_HVM_MISC
+#define DO_TRC_HVM_IO_ASSIST   DEFAULT_HVM_MISC
+#define DO_TRC_HVM_MMIO_ASSIST DEFAULT_HVM_MISC
+#define DO_TRC_HVM_CLTS        DEFAULT_HVM_MISC
+#define DO_TRC_HVM_LMSW        DEFAULT_HVM_MISC
+#define DO_TRC_HVM_LMSW64      DEFAULT_HVM_MISC
+
 
 #ifdef __x86_64__
-    if ( hvm_long_mode_enabled(v) )
-    {
-        struct {
-            unsigned did:16, vid:16;
-            unsigned exit_reason:32;
-            u64 rip;
-        } d;
+#define TRC_PAR_LONG(par) ((par)&0xFFFFFFFF),((par)>>32)
+#else
+#define TRC_PAR_LONG(par) (par)
+#endif
 
-        d.did = v->domain->domain_id;
-        d.vid = v->vcpu_id;
-        d.exit_reason = exit_reason;
-        d.rip = rip;
-        __trace_var(TRC_HVM_VMEXIT64, 1/*cycles*/, sizeof(d),
-                    (unsigned char *)&d);
-    }
-    else
-#endif
-    {
-        struct {
-            unsigned did:16, vid:16;
-            unsigned exit_reason:32;
-            u32 eip;
-        } d;
-
-        d.did = v->domain->domain_id;
-        d.vid = v->vcpu_id;
-        d.exit_reason = exit_reason;
-        d.eip = rip;
-        __trace_var(TRC_HVM_VMEXIT, 1/*cycles*/, sizeof(d),
-                    (unsigned char *)&d);
-    }
-}
-
-
-static inline void hvmtrace_vmentry(struct vcpu *v)
-{
-    struct {
-        unsigned did:16, vid:16;
-    } d;
-
-    if ( likely(!tb_init_done) )
-        return;
-
-    d.did = v->domain->domain_id;
-    d.vid = v->vcpu_id;
-    __trace_var(TRC_HVM_VMENTRY, 1/*cycles*/, sizeof(d), (unsigned char *)&d);
-}
-
-static inline void hvmtrace_msr_read(struct vcpu *v, u32 ecx, u64 msr_content)
-{
-    struct {
-        unsigned did:16, vid:16;
-        u32 ecx;
-        u64 msr_content;
-    } d;
-
-    if ( likely(!tb_init_done) )
-        return;
-
-    d.did = v->domain->domain_id;
-    d.vid = v->vcpu_id;
-    d.ecx = ecx;
-    d.msr_content = msr_content;
-    __trace_var(TRC_HVM_MSR_READ, 0/*!cycles*/, sizeof(d),
-                (unsigned char *)&d);
-}
-
-static inline void hvmtrace_msr_write(struct vcpu *v, u32 ecx, u64 msr_content)
-{
-    struct {
-        unsigned did:16, vid:16;
-        u32 ecx;
-        u64 msr_content;
-    } d;
-
-    if ( likely(!tb_init_done) )
-        return;
-
-    d.did = v->domain->domain_id;
-    d.vid = v->vcpu_id;
-    d.ecx = ecx;
-    d.msr_content = msr_content;
-    __trace_var(TRC_HVM_MSR_WRITE, 0/*!cycles*/,sizeof(d),
-                (unsigned char *)&d);
-}
-
-static inline void hvmtrace_pf_xen(struct vcpu *v, unsigned long va,
-                                   u32 error_code)
-{
-    if ( likely(!tb_init_done) )
-        return;
-
-#ifdef __x86_64__
-    if( hvm_long_mode_enabled(v) )
-    {
-        struct {
-            unsigned did:16, vid:16;
-            u32 error_code;
-            u64 va;
-        } d;
-        d.did = v->domain->domain_id;
-        d.vid = v->vcpu_id;
-        d.error_code = error_code;
-        d.va = va;
-        __trace_var(TRC_HVM_PF_XEN64, 0/*!cycles*/,sizeof(d),
-                    (unsigned char *)&d);
-    }
-    else
-#endif
-    {
-        struct {
-            unsigned did:16, vid:16;
-            u32 error_code;
-            u32 va;
-        } d;
-        d.did = v->domain->domain_id;
-        d.vid = v->vcpu_id;
-        d.error_code = error_code;
-        d.va = va;
-        __trace_var(TRC_HVM_PF_XEN, 0/*!cycles*/,sizeof(d),
-                    (unsigned char *)&d);
-    }
-}
-
-#define HVMTRACE_ND(evt, vcpu, count, d1, d2, d3, d4)                   \
+#define HVMTRACE_ND(evt, cycles, vcpu, count, d1, d2, d3, d4, d5, d6)   \
     do {                                                                \
         if ( unlikely(tb_init_done) && DO_TRC_HVM_ ## evt )             \
         {                                                               \
             struct {                                                    \
-                unsigned did:16, vid:16;                                \
-                u32 d[4];                                               \
+                u32 did:16, vid:16;                                     \
+                u32 d[6];                                               \
             } _d;                                                       \
             _d.did=(vcpu)->domain->domain_id;                           \
             _d.vid=(vcpu)->vcpu_id;                                     \
@@ -177,16 +70,45 @@ static inline void hvmtrace_pf_xen(struc
             _d.d[1]=(d2);                                               \
             _d.d[2]=(d3);                                               \
             _d.d[3]=(d4);                                               \
-            __trace_var(TRC_HVM_ ## evt, 0/*!cycles*/,                  \
+            _d.d[4]=(d5);                                               \
+            _d.d[5]=(d6);                                               \
+            __trace_var(TRC_HVM_ ## evt, cycles,                        \
                         sizeof(u32)*count+1, (unsigned char *)&_d);     \
         }                                                               \
     } while(0)
 
-#define HVMTRACE_4D(evt, vcpu, d1, d2, d3, d4)   HVMTRACE_ND(evt, vcpu, 4, d1, 
d2, d3,  d4)
-#define HVMTRACE_3D(evt, vcpu, d1, d2, d3)       HVMTRACE_ND(evt, vcpu, 3, d1, 
d2, d3,  0)
-#define HVMTRACE_2D(evt, vcpu, d1, d2)           HVMTRACE_ND(evt, vcpu, 2, d1, 
d2,  0,  0)
-#define HVMTRACE_1D(evt, vcpu, d1)               HVMTRACE_ND(evt, vcpu, 1, d1, 
 0,  0,  0)
-#define HVMTRACE_0D(evt, vcpu)                   HVMTRACE_ND(evt, vcpu, 0, 0,  
0,  0,  0)
+#define HVMTRACE_6D(evt, vcpu, d1, d2, d3, d4, d5, d6)    \
+                      HVMTRACE_ND(evt, 0, vcpu, 6, d1, d2, d3,  d4, d5, d6)
+#define HVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5)        \
+                      HVMTRACE_ND(evt, 0, vcpu, 5, d1, d2, d3,  d4, d5, 0)
+#define HVMTRACE_4D(evt, vcpu, d1, d2, d3, d4)               \
+                      HVMTRACE_ND(evt, 0, vcpu, 4, d1, d2, d3,  d4, 0, 0)
+#define HVMTRACE_3D(evt, vcpu, d1, d2, d3)                   \
+                      HVMTRACE_ND(evt, 0, vcpu, 3, d1, d2, d3,  0, 0, 0)
+#define HVMTRACE_2D(evt, vcpu, d1, d2)                       \
+                      HVMTRACE_ND(evt, 0, vcpu, 2, d1, d2,  0,  0, 0, 0)
+#define HVMTRACE_1D(evt, vcpu, d1)                           \
+                      HVMTRACE_ND(evt, 0, vcpu, 1, d1,  0,  0,  0, 0, 0)
+#define HVMTRACE_0D(evt, vcpu)                               \
+                      HVMTRACE_ND(evt, 0, vcpu, 0, 0,  0,  0,  0, 0, 0)
+
+
+
+#ifdef __x86_64__
+#define HVMTRACE_LONG_1D(evt, vcpu, d1)                  \
+                   HVMTRACE_2D(evt ## 64, vcpu, (d1) & 0xFFFFFFFF, (d1) >> 32)
+#define HVMTRACE_LONG_2D(evt,vcpu,d1,d2, ...)              \
+                   HVMTRACE_3D(evt ## 64, vcpu, d1, d2)
+#define HVMTRACE_LONG_3D(evt, vcpu, d1, d2, d3, ...)      \
+                   HVMTRACE_4D(evt ## 64, vcpu, d1, d2, d3)
+#define HVMTRACE_LONG_4D(evt, vcpu, d1, d2, d3, d4, ...)  \
+                   HVMTRACE_5D(evt ## 64, vcpu, d1, d2, d3, d4)
+#else
+#define HVMTRACE_LONG_1D HVMTRACE_1D
+#define HVMTRACE_LONG_2D HVMTRACE_2D
+#define HVMTRACE_LONG_3D HVMTRACE_3D
+#define HVMTRACE_LONG_4D HVMTRACE_4D
+#endif
 
 #endif /* __ASM_X86_HVM_TRACE_H__ */
 
diff -r b320cfe1f10f xen/include/public/trace.h
--- a/xen/include/public/trace.h        Thu Jun 05 13:04:07 2008 +0100
+++ b/xen/include/public/trace.h        Fri Jun 06 12:04:01 2008 +0200
@@ -87,21 +87,25 @@
 #define TRC_PV_PTWR_EMULATION        (TRC_PV + 11)
 #define TRC_PV_PTWR_EMULATION_PAE    (TRC_PV + 12)
   /* Indicates that addresses in trace record are 64 bits */
-#define TRC_PV_64_FLAG               (0x100) 
+#define TRC_64_FLAG               (0x100) 
 
 /* trace events per subclass */
 #define TRC_HVM_VMENTRY         (TRC_HVM_ENTRYEXIT + 0x01)
 #define TRC_HVM_VMEXIT          (TRC_HVM_ENTRYEXIT + 0x02)
-#define TRC_HVM_VMEXIT64        (TRC_HVM_ENTRYEXIT + 0x03)
+#define TRC_HVM_VMEXIT64        (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
 #define TRC_HVM_PF_XEN          (TRC_HVM_HANDLER + 0x01)
+#define TRC_HVM_PF_XEN64        (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01)
 #define TRC_HVM_PF_INJECT       (TRC_HVM_HANDLER + 0x02)
+#define TRC_HVM_PF_INJECT64     (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02)
 #define TRC_HVM_INJ_EXC         (TRC_HVM_HANDLER + 0x03)
 #define TRC_HVM_INJ_VIRQ        (TRC_HVM_HANDLER + 0x04)
 #define TRC_HVM_REINJ_VIRQ      (TRC_HVM_HANDLER + 0x05)
 #define TRC_HVM_IO_READ         (TRC_HVM_HANDLER + 0x06)
 #define TRC_HVM_IO_WRITE        (TRC_HVM_HANDLER + 0x07)
 #define TRC_HVM_CR_READ         (TRC_HVM_HANDLER + 0x08)
+#define TRC_HVM_CR_READ64       (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08)
 #define TRC_HVM_CR_WRITE        (TRC_HVM_HANDLER + 0x09)
+#define TRC_HVM_CR_WRITE64      (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09)
 #define TRC_HVM_DR_READ         (TRC_HVM_HANDLER + 0x0A)
 #define TRC_HVM_DR_WRITE        (TRC_HVM_HANDLER + 0x0B)
 #define TRC_HVM_MSR_READ        (TRC_HVM_HANDLER + 0x0C)
@@ -113,12 +117,13 @@
 #define TRC_HVM_VMMCALL         (TRC_HVM_HANDLER + 0x12)
 #define TRC_HVM_HLT             (TRC_HVM_HANDLER + 0x13)
 #define TRC_HVM_INVLPG          (TRC_HVM_HANDLER + 0x14)
+#define TRC_HVM_INVLPG64        (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14)
 #define TRC_HVM_MCE             (TRC_HVM_HANDLER + 0x15)
 #define TRC_HVM_IO_ASSIST       (TRC_HVM_HANDLER + 0x16)
 #define TRC_HVM_MMIO_ASSIST     (TRC_HVM_HANDLER + 0x17)
 #define TRC_HVM_CLTS            (TRC_HVM_HANDLER + 0x18)
 #define TRC_HVM_LMSW            (TRC_HVM_HANDLER + 0x19)
-#define TRC_HVM_PF_XEN64        (TRC_HVM_HANDLER + 0x20)
+#define TRC_HVM_LMSW64          (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
 
 /* This structure represents a single trace buffer record. */
 struct t_rec {
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.