[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/traps: Drop use_error_code parameter from do_{, guest_}trap()



commit 2e426d6eecfd358b6a78553e63fcb24548010537
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Wed Aug 3 16:56:56 2016 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Mon Aug 8 12:52:56 2016 +0100

    x86/traps: Drop use_error_code parameter from do_{,guest_}trap()
    
    Whether or not an error code is needed can be determinted entirely from the
    trapnr paramter, as error codes are architecturally specified.
    
    Introduce TRAP_HAVE_EC as a bitmap of reserved vectors which have error 
codes,
    and drop the use_error_code from all callsites.
    
    As a result, the DO_ERROR{,_NOCODE}() macros become entirely superflouous 
and
    can be dropped.  Update the exception_table to point straight at do_trap().
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/traps.c            | 91 +++++++++++++++++------------------------
 xen/arch/x86/x86_64/entry.S     | 18 ++++----
 xen/include/asm-x86/processor.h |  6 +++
 3 files changed, 52 insertions(+), 63 deletions(-)

diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index b042976..c228b45 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -625,12 +625,17 @@ void fatal_trap(const struct cpu_user_regs *regs, bool_t 
show_remote)
           (regs->eflags & X86_EFLAGS_IF) ? "" : ", IN INTERRUPT CONTEXT");
 }
 
-static void do_guest_trap(
-    int trapnr, const struct cpu_user_regs *regs, int use_error_code)
+static void do_guest_trap(unsigned int trapnr,
+                          const struct cpu_user_regs *regs)
 {
     struct vcpu *v = current;
     struct trap_bounce *tb;
     const struct trap_info *ti;
+    bool_t use_error_code;
+
+    ASSERT(trapnr < 32);
+
+    use_error_code = (TRAP_HAVE_EC & (1u << trapnr));
 
     trace_pv_trap(trapnr, regs->eip, use_error_code, regs->error_code);
 
@@ -666,7 +671,7 @@ static void instruction_done(
         current->arch.debugreg[6] |= bpmatch | DR_STATUS_RESERVED_ONE;
         if ( regs->eflags & X86_EFLAGS_TF )
             current->arch.debugreg[6] |= DR_STEP;
-        do_guest_trap(TRAP_debug, regs, 0);
+        do_guest_trap(TRAP_debug, regs);
     }
 }
 
@@ -714,7 +719,7 @@ int set_guest_machinecheck_trapbounce(void)
     struct vcpu *v = current;
     struct trap_bounce *tb = &v->arch.pv_vcpu.trap_bounce;
  
-    do_guest_trap(TRAP_machine_check, guest_cpu_user_regs(), 0);
+    do_guest_trap(TRAP_machine_check, guest_cpu_user_regs());
     tb->flags &= ~TBF_EXCEPTION; /* not needed for MCE delivery path */
     return !null_trap_bounce(v, tb);
 }
@@ -727,7 +732,7 @@ int set_guest_nmi_trapbounce(void)
 {
     struct vcpu *v = current;
     struct trap_bounce *tb = &v->arch.pv_vcpu.trap_bounce;
-    do_guest_trap(TRAP_nmi, guest_cpu_user_regs(), 0);
+    do_guest_trap(TRAP_nmi, guest_cpu_user_regs());
     tb->flags &= ~TBF_EXCEPTION; /* not needed for NMI delivery path */
     return !null_trap_bounce(v, tb);
 }
@@ -743,7 +748,7 @@ void do_reserved_trap(struct cpu_user_regs *regs)
     panic("FATAL RESERVED TRAP %#x: %s", trapnr, trapstr(trapnr));
 }
 
-static void do_trap(struct cpu_user_regs *regs, int use_error_code)
+void do_trap(struct cpu_user_regs *regs)
 {
     struct vcpu *curr = current;
     unsigned int trapnr = regs->entry_vector;
@@ -757,7 +762,7 @@ static void do_trap(struct cpu_user_regs *regs, int 
use_error_code)
 
     if ( guest_mode(regs) )
     {
-        do_guest_trap(trapnr, regs, use_error_code);
+        do_guest_trap(trapnr, regs);
         return;
     }
 
@@ -789,28 +794,6 @@ static void do_trap(struct cpu_user_regs *regs, int 
use_error_code)
           trapnr, trapstr(trapnr), regs->error_code);
 }
 
-#define DO_ERROR_NOCODE(name)                           \
-void do_##name(struct cpu_user_regs *regs)              \
-{                                                       \
-    do_trap(regs, 0);                                   \
-}
-
-#define DO_ERROR(name)                                  \
-void do_##name(struct cpu_user_regs *regs)              \
-{                                                       \
-    do_trap(regs, 1);                                   \
-}
-
-DO_ERROR_NOCODE(divide_error)
-DO_ERROR_NOCODE(overflow)
-DO_ERROR_NOCODE(bounds)
-DO_ERROR(       invalid_TSS)
-DO_ERROR(       segment_not_present)
-DO_ERROR(       stack_segment)
-DO_ERROR_NOCODE(coprocessor_error)
-DO_ERROR(       alignment_check)
-DO_ERROR_NOCODE(simd_coprocessor_error)
-
 /* Returns 0 if not handled, and non-0 for success. */
 int rdmsr_hypervisor_regs(uint32_t idx, uint64_t *val)
 {
@@ -1318,7 +1301,7 @@ void do_invalid_op(struct cpu_user_regs *regs)
     {
         if ( !emulate_invalid_rdtscp(regs) &&
              !emulate_forced_invalid_op(regs) )
-            do_guest_trap(TRAP_invalid_op, regs, 0);
+            do_guest_trap(TRAP_invalid_op, regs);
         return;
     }
 
@@ -1432,7 +1415,7 @@ void do_int3(struct cpu_user_regs *regs)
         return;
     } 
 
-    do_guest_trap(TRAP_int3, regs, 0);
+    do_guest_trap(TRAP_int3, regs);
 }
 
 static void reserved_bit_page_fault(
@@ -2604,7 +2587,7 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
             if ( lock || rep_prefix || opsize_prefix
                  || !(v->arch.pv_vcpu.ctrlreg[4] & X86_CR4_OSXSAVE) )
             {
-                do_guest_trap(TRAP_invalid_op, regs, 0);
+                do_guest_trap(TRAP_invalid_op, regs);
                 goto skip;
             }
 
@@ -3136,12 +3119,12 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
          (((ar >> 13) & 3) < (regs->cs & 3)) ||
          ((ar & _SEGMENT_TYPE) != 0xc00) )
     {
-        do_guest_trap(TRAP_gp_fault, regs, 1);
+        do_guest_trap(TRAP_gp_fault, regs);
         return;
     }
     if ( !(ar & _SEGMENT_P) )
     {
-        do_guest_trap(TRAP_no_segment, regs, 1);
+        do_guest_trap(TRAP_no_segment, regs);
         return;
     }
     dpl = (ar >> 13) & 3;
@@ -3156,7 +3139,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
          !(ar & _SEGMENT_P) ||
          !(ar & _SEGMENT_CODE) )
     {
-        do_guest_trap(TRAP_gp_fault, regs, 1);
+        do_guest_trap(TRAP_gp_fault, regs);
         return;
     }
 
@@ -3310,7 +3293,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
     if ( jump < 0 )
     {
  fail:
-        do_guest_trap(TRAP_gp_fault, regs, 1);
+        do_guest_trap(TRAP_gp_fault, regs);
  skip:
         return;
     }
@@ -3321,7 +3304,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
          !(ar & _SEGMENT_P) ||
          ((ar & _SEGMENT_CODE) && !(ar & _SEGMENT_WR)) )
     {
-        do_guest_trap(TRAP_gp_fault, regs, 1);
+        do_guest_trap(TRAP_gp_fault, regs);
         return;
     }
 
@@ -3332,7 +3315,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
     ASSERT((opnd_sel & ~3) == regs->error_code);
     if ( dpl < (opnd_sel & 3) )
     {
-        do_guest_trap(TRAP_gp_fault, regs, 1);
+        do_guest_trap(TRAP_gp_fault, regs);
         return;
     }
 
@@ -3344,19 +3327,19 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
           ((ar >> 13) & 3) != (regs->cs & 3)) )
     {
         regs->error_code = sel;
-        do_guest_trap(TRAP_gp_fault, regs, 1);
+        do_guest_trap(TRAP_gp_fault, regs);
         return;
     }
     if ( !(ar & _SEGMENT_P) )
     {
         regs->error_code = sel;
-        do_guest_trap(TRAP_no_segment, regs, 1);
+        do_guest_trap(TRAP_no_segment, regs);
         return;
     }
     if ( off > limit )
     {
         regs->error_code = 0;
-        do_guest_trap(TRAP_gp_fault, regs, 1);
+        do_guest_trap(TRAP_gp_fault, regs);
         return;
     }
 
@@ -3383,7 +3366,7 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
             /* Inner stack known only for kernel ring. */
             if ( (sel & 3) != GUEST_KERNEL_RPL(v->domain) )
             {
-                do_guest_trap(TRAP_gp_fault, regs, 1);
+                do_guest_trap(TRAP_gp_fault, regs);
                 return;
             }
             esp = v->arch.pv_vcpu.kernel_sp;
@@ -3396,20 +3379,20 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
                  !(ar & _SEGMENT_WR) )
             {
                 regs->error_code = ss & ~3;
-                do_guest_trap(TRAP_invalid_tss, regs, 1);
+                do_guest_trap(TRAP_invalid_tss, regs);
                 return;
             }
             if ( !(ar & _SEGMENT_P) ||
                  !check_stack_limit(ar, limit, esp, (4 + nparm) * 4) )
             {
                 regs->error_code = ss & ~3;
-                do_guest_trap(TRAP_stack_error, regs, 1);
+                do_guest_trap(TRAP_stack_error, regs);
                 return;
             }
             stkp = (unsigned int *)(unsigned long)((unsigned int)base + esp);
             if ( !compat_access_ok(stkp - 4 - nparm, (4 + nparm) * 4) )
             {
-                do_guest_trap(TRAP_gp_fault, regs, 1);
+                do_guest_trap(TRAP_gp_fault, regs);
                 return;
             }
             push(regs->ss);
@@ -3424,11 +3407,11 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
                      (ar & _SEGMENT_CODE) ||
                      !(ar & _SEGMENT_WR) ||
                      !check_stack_limit(ar, limit, esp + nparm * 4, nparm * 4) 
)
-                    return do_guest_trap(TRAP_gp_fault, regs, 1);
+                    return do_guest_trap(TRAP_gp_fault, regs);
                 ustkp = (unsigned int *)(unsigned long)((unsigned int)base + 
regs->_esp + nparm * 4);
                 if ( !compat_access_ok(ustkp - nparm, nparm * 4) )
                 {
-                    do_guest_trap(TRAP_gp_fault, regs, 1);
+                    do_guest_trap(TRAP_gp_fault, regs);
                     return;
                 }
                 do
@@ -3454,19 +3437,19 @@ static void emulate_gate_op(struct cpu_user_regs *regs)
             if ( !read_descriptor(ss, v, regs, &base, &limit, &ar, 0) ||
                  ((ar >> 13) & 3) != (sel & 3) )
             {
-                do_guest_trap(TRAP_gp_fault, regs, 1);
+                do_guest_trap(TRAP_gp_fault, regs);
                 return;
             }
             if ( !check_stack_limit(ar, limit, esp, 2 * 4) )
             {
                 regs->error_code = 0;
-                do_guest_trap(TRAP_stack_error, regs, 1);
+                do_guest_trap(TRAP_stack_error, regs);
                 return;
             }
             stkp = (unsigned int *)(unsigned long)((unsigned int)base + esp);
             if ( !compat_access_ok(stkp - 2, 2 * 4) )
             {
-                do_guest_trap(TRAP_gp_fault, regs, 1);
+                do_guest_trap(TRAP_gp_fault, regs);
                 return;
             }
         }
@@ -3527,7 +3510,7 @@ void do_general_protection(struct cpu_user_regs *regs)
         if ( permit_softint(TI_GET_DPL(ti), v, regs) )
         {
             regs->eip += 2;
-            do_guest_trap(vector, regs, 0);
+            do_guest_trap(vector, regs);
             return;
         }
     }
@@ -3546,7 +3529,7 @@ void do_general_protection(struct cpu_user_regs *regs)
     }
 
     /* Pass on GPF as is. */
-    do_guest_trap(TRAP_gp_fault, regs, 1);
+    do_guest_trap(TRAP_gp_fault, regs);
     return;
 
  gp_in_kernel:
@@ -3762,7 +3745,7 @@ void do_device_not_available(struct cpu_user_regs *regs)
 
     if ( curr->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS )
     {
-        do_guest_trap(TRAP_no_device, regs, 0);
+        do_guest_trap(TRAP_no_device, regs);
         curr->arch.pv_vcpu.ctrlreg[0] &= ~X86_CR0_TS;
     }
     else
@@ -3835,7 +3818,7 @@ void do_debug(struct cpu_user_regs *regs)
     v->arch.debugreg[6] = read_debugreg(6);
 
     ler_enable();
-    do_guest_trap(TRAP_debug, regs, 0);
+    do_guest_trap(TRAP_debug, regs);
     return;
 
  out:
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index f7178cd..64d260a 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -715,26 +715,26 @@ GLOBAL(trap_nop)
 .section .rodata, "a", @progbits
 
 ENTRY(exception_table)
-        .quad do_divide_error
+        .quad do_trap
         .quad do_debug
         .quad do_nmi
         .quad do_int3
-        .quad do_overflow
-        .quad do_bounds
+        .quad do_trap
+        .quad do_trap
         .quad do_invalid_op
         .quad do_device_not_available
         .quad do_reserved_trap /* double_fault - has its own entry. */
         .quad do_reserved_trap /* coproc_seg_overrun - Intel 387 only. */
-        .quad do_invalid_TSS
-        .quad do_segment_not_present
-        .quad do_stack_segment
+        .quad do_trap
+        .quad do_trap
+        .quad do_trap
         .quad do_general_protection
         .quad do_page_fault
         .quad do_reserved_trap /* Default PIC spurious irq - architecturally 
reserved. */
-        .quad do_coprocessor_error
-        .quad do_alignment_check
+        .quad do_trap
+        .quad do_trap
         .quad do_machine_check
-        .quad do_simd_coprocessor_error
+        .quad do_trap
         .rept TRAP_nr - ((. - exception_table) / 8)
         .quad do_reserved_trap /* Architecturally reserved exceptions. */
         .endr
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index c69d7a4..487ae28 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -117,6 +117,12 @@
 #define TRAP_virtualisation   20
 #define TRAP_nr               32
 
+#define TRAP_HAVE_EC                                                    \
+    ((1u << TRAP_double_fault) | (1u << TRAP_invalid_tss) |             \
+     (1u << TRAP_no_segment) | (1u << TRAP_stack_error) |               \
+     (1u << TRAP_gp_fault) | (1u << TRAP_page_fault) |                  \
+     (1u << TRAP_alignment_check))
+
 /* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
 /* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */
 #define TRAP_syscall         256
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.