[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86/pv: Move x86/trace.c to x86/pv/trace.c



commit 8aae4f376ca59174a25938e95dc41e9883ee675c
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Mon Sep 20 15:02:32 2021 +0100
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Sep 21 19:41:12 2021 +0100

    x86/pv: Move x86/trace.c to x86/pv/trace.c
    
    This entire file is pv-only, and not excluded from the build by
    CONFIG_TRACEBUFFER.  Move it into the pv/ directory, build it conditionally,
    and drop unused includes.
    
    Also move the contents of asm/trace.h to asm/pv/trace.h to avoid the 
functions
    being declared across the entire hypervisor.
    
    One caller in fixup_page_fault() is effectively PV only, but is not subject 
to
    dead code elimination.  Add an additional IS_ENABLED(CONFIG_PV) to keep the
    build happy.
    
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/arch/x86/Makefile           |   1 -
 xen/arch/x86/pv/Makefile        |   1 +
 xen/arch/x86/pv/emul-inv-op.c   |   2 +-
 xen/arch/x86/pv/emul-priv-op.c  |   1 +
 xen/arch/x86/pv/ro-page-fault.c |   2 +-
 xen/arch/x86/pv/trace.c         | 156 +++++++++++++++++++++++++++++++++++++++
 xen/arch/x86/pv/traps.c         |   2 +-
 xen/arch/x86/trace.c            | 159 ----------------------------------------
 xen/arch/x86/traps.c            |   3 +-
 xen/include/asm-x86/pv/trace.h  |  48 ++++++++++++
 xen/include/asm-x86/trace.h     |  42 -----------
 11 files changed, 211 insertions(+), 206 deletions(-)

diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile
index 202d4d2782..82dd4e18bd 100644
--- a/xen/arch/x86/Makefile
+++ b/xen/arch/x86/Makefile
@@ -61,7 +61,6 @@ obj-y += spec_ctrl.o
 obj-y += srat.o
 obj-y += string.o
 obj-y += time.o
-obj-y += trace.o
 obj-y += traps.o
 obj-y += tsx.o
 obj-y += usercopy.o
diff --git a/xen/arch/x86/pv/Makefile b/xen/arch/x86/pv/Makefile
index 75b01b0062..6cda354cc4 100644
--- a/xen/arch/x86/pv/Makefile
+++ b/xen/arch/x86/pv/Makefile
@@ -12,6 +12,7 @@ obj-y += misc-hypercalls.o
 obj-y += mm.o
 obj-y += ro-page-fault.o
 obj-$(CONFIG_PV_SHIM) += shim.o
+obj-$(CONFIG_TRACEBUFFER) += trace.o
 obj-y += traps.o
 
 obj-bin-y += dom0_build.init.o
diff --git a/xen/arch/x86/pv/emul-inv-op.c b/xen/arch/x86/pv/emul-inv-op.c
index b15f302bca..2c07eed9a0 100644
--- a/xen/arch/x86/pv/emul-inv-op.c
+++ b/xen/arch/x86/pv/emul-inv-op.c
@@ -19,7 +19,7 @@
  * along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <xen/trace.h>
+#include <asm/pv/trace.h>
 
 #include "emulate.h"
 
diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c
index 11467a1e3a..d0df5bc616 100644
--- a/xen/arch/x86/pv/emul-priv-op.c
+++ b/xen/arch/x86/pv/emul-priv-op.c
@@ -30,6 +30,7 @@
 #include <asm/hypercall.h>
 #include <asm/mc146818rtc.h>
 #include <asm/pv/domain.h>
+#include <asm/pv/trace.h>
 #include <asm/shared.h>
 
 #include <xsm/xsm.h>
diff --git a/xen/arch/x86/pv/ro-page-fault.c b/xen/arch/x86/pv/ro-page-fault.c
index 335aa8dc5d..ac5b66870c 100644
--- a/xen/arch/x86/pv/ro-page-fault.c
+++ b/xen/arch/x86/pv/ro-page-fault.c
@@ -20,7 +20,7 @@
  * along with this program; If not, see <http://www.gnu.org/licenses/>.
  */
 
-#include <xen/trace.h>
+#include <asm/pv/trace.h>
 #include <asm/shadow.h>
 
 #include "emulate.h"
diff --git a/xen/arch/x86/pv/trace.c b/xen/arch/x86/pv/trace.c
new file mode 100644
index 0000000000..550c22765b
--- /dev/null
+++ b/xen/arch/x86/pv/trace.c
@@ -0,0 +1,156 @@
+#include <xen/sched.h>
+
+#include <asm/pv/trace.h>
+
+void __trace_pv_trap(int trapnr, unsigned long eip,
+                     int use_error_code, unsigned error_code)
+{
+    if ( is_pv_32bit_vcpu(current) )
+    {
+        struct __packed {
+            unsigned eip:32,
+                trapnr:15,
+                use_error_code:1,
+                error_code:16;
+        } d;
+
+        d.eip = eip;
+        d.trapnr = trapnr;
+        d.error_code = error_code;
+        d.use_error_code=!!use_error_code;
+
+        __trace_var(TRC_PV_TRAP, 1, sizeof(d), &d);
+    }
+    else
+    {
+        struct __packed {
+            unsigned long eip;
+            unsigned trapnr:15,
+                use_error_code:1,
+                error_code:16;
+        } d;
+        unsigned event;
+
+        d.eip = eip;
+        d.trapnr = trapnr;
+        d.error_code = error_code;
+        d.use_error_code=!!use_error_code;
+
+        event = TRC_PV_TRAP;
+        event |= TRC_64_FLAG;
+        __trace_var(event, 1, sizeof(d), &d);
+    }
+}
+
+void __trace_pv_page_fault(unsigned long addr, unsigned error_code)
+{
+    unsigned long eip = guest_cpu_user_regs()->rip;
+
+    if ( is_pv_32bit_vcpu(current) )
+    {
+        struct __packed {
+            u32 eip, addr, error_code;
+        } d;
+
+        d.eip = eip;
+        d.addr = addr;
+        d.error_code = error_code;
+
+        __trace_var(TRC_PV_PAGE_FAULT, 1, sizeof(d), &d);
+    }
+    else
+    {
+        struct __packed {
+            unsigned long eip, addr;
+            u32 error_code;
+        } d;
+        unsigned event;
+
+        d.eip = eip;
+        d.addr = addr;
+        d.error_code = error_code;
+        event = TRC_PV_PAGE_FAULT;
+        event |= TRC_64_FLAG;
+        __trace_var(event, 1, sizeof(d), &d);
+    }
+}
+
+void __trace_trap_one_addr(unsigned event, unsigned long va)
+{
+    if ( is_pv_32bit_vcpu(current) )
+    {
+        u32 d = va;
+        __trace_var(event, 1, sizeof(d), &d);
+    }
+    else
+    {
+        event |= TRC_64_FLAG;
+        __trace_var(event, 1, sizeof(va), &va);
+    }
+}
+
+void __trace_trap_two_addr(unsigned event, unsigned long va1,
+                           unsigned long va2)
+{
+    if ( is_pv_32bit_vcpu(current) )
+    {
+        struct __packed {
+            u32 va1, va2;
+        } d;
+        d.va1=va1;
+        d.va2=va2;
+        __trace_var(event, 1, sizeof(d), &d);
+    }
+    else
+    {
+        struct __packed {
+            unsigned long va1, va2;
+        } d;
+        d.va1=va1;
+        d.va2=va2;
+        event |= TRC_64_FLAG;
+        __trace_var(event, 1, sizeof(d), &d);
+    }
+}
+
+void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte)
+{
+    unsigned long eip = guest_cpu_user_regs()->rip;
+
+    /* We have a couple of different modes to worry about:
+     * - 32-on-32: 32-bit pte, 32-bit virtual addresses
+     * - pae-on-pae, pae-on-64: 64-bit pte, 32-bit virtual addresses
+     * - 64-on-64: 64-bit pte, 64-bit virtual addresses
+     * pae-on-64 is the only one that requires extra code; in all other
+     * cases, "unsigned long" is the size of a guest virtual address.
+     */
+
+    if ( is_pv_32bit_vcpu(current) )
+    {
+        struct __packed {
+            l1_pgentry_t pte;
+            u32 addr, eip;
+        } d;
+        d.addr = addr;
+        d.eip = eip;
+        d.pte = npte;
+
+        __trace_var(TRC_PV_PTWR_EMULATION_PAE, 1, sizeof(d), &d);
+    }
+    else
+    {
+        struct {
+            l1_pgentry_t pte;
+            unsigned long addr, eip;
+        } d;
+        unsigned event;
+
+        d.addr = addr;
+        d.eip = eip;
+        d.pte = npte;
+
+        event = TRC_PV_PTWR_EMULATION;
+        event |= TRC_64_FLAG;
+        __trace_var(event, 1/*tsc*/, sizeof(d), &d);
+    }
+}
diff --git a/xen/arch/x86/pv/traps.c b/xen/arch/x86/pv/traps.c
index 7439b76df8..764773c021 100644
--- a/xen/arch/x86/pv/traps.c
+++ b/xen/arch/x86/pv/traps.c
@@ -22,10 +22,10 @@
 #include <xen/event.h>
 #include <xen/hypercall.h>
 #include <xen/lib.h>
-#include <xen/trace.h>
 #include <xen/softirq.h>
 
 #include <asm/apic.h>
+#include <asm/pv/trace.h>
 #include <asm/shared.h>
 #include <asm/traps.h>
 #include <irq_vectors.h>
diff --git a/xen/arch/x86/trace.c b/xen/arch/x86/trace.c
deleted file mode 100644
index 4a953c5b2f..0000000000
--- a/xen/arch/x86/trace.c
+++ /dev/null
@@ -1,159 +0,0 @@
-#include <xen/init.h>
-#include <xen/kernel.h>
-#include <xen/lib.h>
-#include <xen/domain.h>
-#include <xen/sched.h>
-#include <xen/trace.h>
-
-void __trace_pv_trap(int trapnr, unsigned long eip,
-                     int use_error_code, unsigned error_code)
-{
-    if ( is_pv_32bit_vcpu(current) )
-    {
-        struct __packed {
-            unsigned eip:32,
-                trapnr:15,
-                use_error_code:1,
-                error_code:16;
-        } d;
-
-        d.eip = eip;
-        d.trapnr = trapnr;
-        d.error_code = error_code;
-        d.use_error_code=!!use_error_code;
-                
-        __trace_var(TRC_PV_TRAP, 1, sizeof(d), &d);
-    }
-    else
-    {
-        struct __packed {
-            unsigned long eip;
-            unsigned trapnr:15,
-                use_error_code:1,
-                error_code:16;
-        } d;
-        unsigned event;
-
-        d.eip = eip;
-        d.trapnr = trapnr;
-        d.error_code = error_code;
-        d.use_error_code=!!use_error_code;
-                
-        event = TRC_PV_TRAP;
-        event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), &d);
-    }
-}
-
-void __trace_pv_page_fault(unsigned long addr, unsigned error_code)
-{
-    unsigned long eip = guest_cpu_user_regs()->rip;
-
-    if ( is_pv_32bit_vcpu(current) )
-    {
-        struct __packed {
-            u32 eip, addr, error_code;
-        } d;
-
-        d.eip = eip;
-        d.addr = addr;
-        d.error_code = error_code;
-                
-        __trace_var(TRC_PV_PAGE_FAULT, 1, sizeof(d), &d);
-    }
-    else
-    {
-        struct __packed {
-            unsigned long eip, addr;
-            u32 error_code;
-        } d;
-        unsigned event;
-
-        d.eip = eip;
-        d.addr = addr;
-        d.error_code = error_code;
-        event = TRC_PV_PAGE_FAULT;
-        event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), &d);
-    }
-}
-
-void __trace_trap_one_addr(unsigned event, unsigned long va)
-{
-    if ( is_pv_32bit_vcpu(current) )
-    {
-        u32 d = va;
-        __trace_var(event, 1, sizeof(d), &d);
-    }
-    else
-    {
-        event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(va), &va);
-    }
-}
-
-void __trace_trap_two_addr(unsigned event, unsigned long va1,
-                           unsigned long va2)
-{
-    if ( is_pv_32bit_vcpu(current) )
-    {
-        struct __packed {
-            u32 va1, va2;
-        } d;
-        d.va1=va1;
-        d.va2=va2;
-        __trace_var(event, 1, sizeof(d), &d);
-    }
-    else
-    {
-        struct __packed {
-            unsigned long va1, va2;
-        } d;
-        d.va1=va1;
-        d.va2=va2;
-        event |= TRC_64_FLAG;
-        __trace_var(event, 1, sizeof(d), &d);
-    }
-}
-
-void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte)
-{
-    unsigned long eip = guest_cpu_user_regs()->rip;
-
-    /* We have a couple of different modes to worry about:
-     * - 32-on-32: 32-bit pte, 32-bit virtual addresses
-     * - pae-on-pae, pae-on-64: 64-bit pte, 32-bit virtual addresses
-     * - 64-on-64: 64-bit pte, 64-bit virtual addresses
-     * pae-on-64 is the only one that requires extra code; in all other
-     * cases, "unsigned long" is the size of a guest virtual address.
-     */
-
-    if ( is_pv_32bit_vcpu(current) )
-    {
-        struct __packed {
-            l1_pgentry_t pte;
-            u32 addr, eip;
-        } d;
-        d.addr = addr;
-        d.eip = eip;
-        d.pte = npte;
-
-        __trace_var(TRC_PV_PTWR_EMULATION_PAE, 1, sizeof(d), &d);
-    }
-    else
-    {
-        struct {
-            l1_pgentry_t pte;
-            unsigned long addr, eip;
-        } d;
-        unsigned event;
-
-        d.addr = addr;
-        d.eip = eip;
-        d.pte = npte;
-
-        event = TRC_PV_PTWR_EMULATION;
-        event |= TRC_64_FLAG;
-        __trace_var(event, 1/*tsc*/, sizeof(d), &d);
-    }
-}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 4a0e498b4c..0cc1ee95cb 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -82,6 +82,7 @@
 #include <xsm/xsm.h>
 #include <asm/mach-default/irq_vectors.h>
 #include <asm/pv/traps.h>
+#include <asm/pv/trace.h>
 #include <asm/pv/mm.h>
 
 /*
@@ -1480,7 +1481,7 @@ static int fixup_page_fault(unsigned long addr, struct 
cpu_user_regs *regs)
     {
         int ret = paging_fault(addr, regs);
 
-        if ( ret == EXCRET_fault_fixed )
+        if ( IS_ENABLED(CONFIG_PV) && ret == EXCRET_fault_fixed )
             trace_trap_two_addr(TRC_PV_PAGING_FIXUP, regs->rip, addr);
         return ret;
     }
diff --git a/xen/include/asm-x86/pv/trace.h b/xen/include/asm-x86/pv/trace.h
new file mode 100644
index 0000000000..c616206eeb
--- /dev/null
+++ b/xen/include/asm-x86/pv/trace.h
@@ -0,0 +1,48 @@
+#ifndef XEN_X86_PV_TRACE_H
+#define XEN_X86_PV_TRACE_H
+
+#include <xen/trace.h>
+
+#include <asm/page.h>
+
+void __trace_pv_trap(int trapnr, unsigned long eip,
+                     int use_error_code, unsigned error_code);
+static inline void trace_pv_trap(int trapnr, unsigned long eip,
+                                 int use_error_code, unsigned error_code)
+{
+    if ( unlikely(tb_init_done) )
+        __trace_pv_trap(trapnr, eip, use_error_code, error_code);
+}
+
+void __trace_pv_page_fault(unsigned long addr, unsigned error_code);
+static inline void trace_pv_page_fault(unsigned long addr,
+                                       unsigned error_code)
+{
+    if ( unlikely(tb_init_done) )
+        __trace_pv_page_fault(addr, error_code);
+}
+
+void __trace_trap_one_addr(unsigned event, unsigned long va);
+static inline void trace_trap_one_addr(unsigned event, unsigned long va)
+{
+    if ( unlikely(tb_init_done) )
+        __trace_trap_one_addr(event, va);
+}
+
+void __trace_trap_two_addr(unsigned event, unsigned long va1,
+                           unsigned long va2);
+static inline void trace_trap_two_addr(unsigned event, unsigned long va1,
+                                       unsigned long va2)
+{
+    if ( unlikely(tb_init_done) )
+        __trace_trap_two_addr(event, va1, va2);
+}
+
+void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte);
+static inline void trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte)
+{
+    if ( unlikely(tb_init_done) )
+        __trace_ptwr_emulation(addr, npte);
+}
+
+#endif /* XEN_X86_PV_TRACE_H */
diff --git a/xen/include/asm-x86/trace.h b/xen/include/asm-x86/trace.h
index e65b5de6ee..edef1bb099 100644
--- a/xen/include/asm-x86/trace.h
+++ b/xen/include/asm-x86/trace.h
@@ -1,46 +1,4 @@
 #ifndef __ASM_TRACE_H__
 #define __ASM_TRACE_H__
 
-#include <asm/page.h>
-
-void __trace_pv_trap(int trapnr, unsigned long eip,
-                     int use_error_code, unsigned error_code);
-static inline void trace_pv_trap(int trapnr, unsigned long eip,
-                                 int use_error_code, unsigned error_code)
-{
-    if ( unlikely(tb_init_done) )
-        __trace_pv_trap(trapnr, eip, use_error_code, error_code);
-}
-
-void __trace_pv_page_fault(unsigned long addr, unsigned error_code);
-static inline void trace_pv_page_fault(unsigned long addr,
-                                       unsigned error_code)
-{
-    if ( unlikely(tb_init_done) )
-        __trace_pv_page_fault(addr, error_code);
-}
-
-void __trace_trap_one_addr(unsigned event, unsigned long va);
-static inline void trace_trap_one_addr(unsigned event, unsigned long va)
-{
-    if ( unlikely(tb_init_done) )
-        __trace_trap_one_addr(event, va);
-}
-
-void __trace_trap_two_addr(unsigned event, unsigned long va1,
-                           unsigned long va2);
-static inline void trace_trap_two_addr(unsigned event, unsigned long va1,
-                                       unsigned long va2)
-{
-    if ( unlikely(tb_init_done) )
-        __trace_trap_two_addr(event, va1, va2);
-}
-
-void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte);
-static inline void trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte)
-{
-    if ( unlikely(tb_init_done) )
-        __trace_ptwr_emulation(addr, npte);
-}
-
 #endif /* __ASM_TRACE_H__ */
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.