[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86/shadow: ditch dangerous declarations



commit 93a8877544c3df39124eea6e1f12a319aa3ce54a
Author:     Jan Beulich <jbeulich@xxxxxxxx>
AuthorDate: Mon Jul 22 11:30:10 2019 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Jul 22 11:30:10 2019 +0200

    x86/shadow: ditch dangerous declarations
    
    This started out with me noticing the latent bug of there being HVM
    related declarations in common.c that their producer doesn't see, and
    that hence could go out of sync at any time. However, go farther than
    fixing just that and move the functions actually using these into hvm.c.
    This way the items in question can simply become static, and no separate
    declarations are needed at all.
    
    Within the moved code constify and rename or outright delete the struct
    vcpu * local variables and re-format a comment.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c  | 99 ----------------------------------------
 xen/arch/x86/mm/shadow/hvm.c     | 82 +++++++++++++++++++++++++++++++--
 xen/arch/x86/mm/shadow/private.h | 15 ++++++
 3 files changed, 94 insertions(+), 102 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 795201dc82..fa18de0bb6 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -112,105 +112,6 @@ static int __init shadow_audit_key_init(void)
 __initcall(shadow_audit_key_init);
 #endif /* SHADOW_AUDIT */
 
-
-#ifdef CONFIG_HVM
-extern const struct x86_emulate_ops hvm_shadow_emulator_ops;
-extern struct segment_register *hvm_get_seg_reg(
-    enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt);
-extern int hvm_translate_virtual_addr(
-    enum x86_segment seg,
-    unsigned long offset,
-    unsigned int bytes,
-    enum hvm_access_type access_type,
-    struct sh_emulate_ctxt *sh_ctxt,
-    unsigned long *linear);
-#endif
-
-const struct x86_emulate_ops *shadow_init_emulation(
-    struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs,
-    unsigned int pte_size)
-{
-#ifdef CONFIG_HVM
-    struct segment_register *creg, *sreg;
-    struct vcpu *v = current;
-    unsigned long addr;
-
-    ASSERT(is_hvm_vcpu(v));
-
-    memset(sh_ctxt, 0, sizeof(*sh_ctxt));
-
-    sh_ctxt->ctxt.regs = regs;
-    sh_ctxt->ctxt.cpuid = v->domain->arch.cpuid;
-    sh_ctxt->ctxt.lma = hvm_long_mode_active(v);
-
-    /* Segment cache initialisation. Primed with CS. */
-    creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
-
-    /* Work out the emulation mode. */
-    if ( sh_ctxt->ctxt.lma && creg->l )
-        sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64;
-    else
-    {
-        sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
-        sh_ctxt->ctxt.addr_size = creg->db ? 32 : 16;
-        sh_ctxt->ctxt.sp_size   = sreg->db ? 32 : 16;
-    }
-
-    sh_ctxt->pte_size = pte_size;
-
-    /* Attempt to prefetch whole instruction. */
-    sh_ctxt->insn_buf_eip = regs->rip;
-    sh_ctxt->insn_buf_bytes =
-        (!hvm_translate_virtual_addr(
-            x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
-            hvm_access_insn_fetch, sh_ctxt, &addr) &&
-         !hvm_copy_from_guest_linear(
-             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
-             PFEC_insn_fetch, NULL))
-        ? sizeof(sh_ctxt->insn_buf) : 0;
-
-    return &hvm_shadow_emulator_ops;
-#else
-    BUG();
-    return NULL;
-#endif
-}
-
-/* Update an initialized emulation context to prepare for the next
- * instruction */
-void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
-                               struct cpu_user_regs *regs)
-{
-#ifdef CONFIG_HVM
-    struct vcpu *v = current;
-    unsigned long addr, diff;
-
-    ASSERT(is_hvm_vcpu(v));
-
-    /*
-     * We don't refetch the segment bases, because we don't emulate
-     * writes to segment registers
-     */
-    diff = regs->rip - sh_ctxt->insn_buf_eip;
-    if ( diff > sh_ctxt->insn_buf_bytes )
-    {
-        /* Prefetch more bytes. */
-        sh_ctxt->insn_buf_bytes =
-            (!hvm_translate_virtual_addr(
-                x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
-                hvm_access_insn_fetch, sh_ctxt, &addr) &&
-             !hvm_copy_from_guest_linear(
-                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
-                 PFEC_insn_fetch, NULL))
-            ? sizeof(sh_ctxt->insn_buf) : 0;
-        sh_ctxt->insn_buf_eip = regs->rip;
-    }
-#else
-    BUG();
-#endif
-}
-
-
 #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
 /**************************************************************************/
 /* Out-of-sync shadows. */
diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c
index c6469c846c..0aa560b7f5 100644
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -54,7 +54,7 @@ static void sh_emulate_unmap_dest(struct vcpu *v, void *addr,
  * Callers which pass a known in-range x86_segment can rely on the return
  * pointer being valid.  Other callers must explicitly check for errors.
  */
-struct segment_register *hvm_get_seg_reg(
+static struct segment_register *hvm_get_seg_reg(
     enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt)
 {
     unsigned int idx = seg;
@@ -69,7 +69,7 @@ struct segment_register *hvm_get_seg_reg(
     return seg_reg;
 }
 
-int hvm_translate_virtual_addr(
+static int hvm_translate_virtual_addr(
     enum x86_segment seg,
     unsigned long offset,
     unsigned int bytes,
@@ -292,13 +292,89 @@ hvm_emulate_cmpxchg(enum x86_segment seg,
     return rc;
 }
 
-const struct x86_emulate_ops hvm_shadow_emulator_ops = {
+static const struct x86_emulate_ops hvm_shadow_emulator_ops = {
     .read       = hvm_emulate_read,
     .insn_fetch = hvm_emulate_insn_fetch,
     .write      = hvm_emulate_write,
     .cmpxchg    = hvm_emulate_cmpxchg,
 };
 
+const struct x86_emulate_ops *shadow_init_emulation(
+    struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs,
+    unsigned int pte_size)
+{
+    struct segment_register *creg, *sreg;
+    const struct vcpu *curr = current;
+    unsigned long addr;
+
+    ASSERT(is_hvm_vcpu(curr));
+
+    memset(sh_ctxt, 0, sizeof(*sh_ctxt));
+
+    sh_ctxt->ctxt.regs = regs;
+    sh_ctxt->ctxt.cpuid = curr->domain->arch.cpuid;
+    sh_ctxt->ctxt.lma = hvm_long_mode_active(curr);
+
+    /* Segment cache initialisation. Primed with CS. */
+    creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt);
+
+    /* Work out the emulation mode. */
+    if ( sh_ctxt->ctxt.lma && creg->l )
+        sh_ctxt->ctxt.addr_size = sh_ctxt->ctxt.sp_size = 64;
+    else
+    {
+        sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
+        sh_ctxt->ctxt.addr_size = creg->db ? 32 : 16;
+        sh_ctxt->ctxt.sp_size   = sreg->db ? 32 : 16;
+    }
+
+    sh_ctxt->pte_size = pte_size;
+
+    /* Attempt to prefetch whole instruction. */
+    sh_ctxt->insn_buf_eip = regs->rip;
+    sh_ctxt->insn_buf_bytes =
+        (!hvm_translate_virtual_addr(
+            x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
+            hvm_access_insn_fetch, sh_ctxt, &addr) &&
+         !hvm_copy_from_guest_linear(
+             sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+             PFEC_insn_fetch, NULL))
+        ? sizeof(sh_ctxt->insn_buf) : 0;
+
+    return &hvm_shadow_emulator_ops;
+}
+
+/*
+ * Update an initialized emulation context to prepare for the next
+ * instruction.
+ */
+void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
+                               struct cpu_user_regs *regs)
+{
+    unsigned long addr, diff;
+
+    ASSERT(is_hvm_vcpu(current));
+
+    /*
+     * We don't refetch the segment bases, because we don't emulate
+     * writes to segment registers
+     */
+    diff = regs->rip - sh_ctxt->insn_buf_eip;
+    if ( diff > sh_ctxt->insn_buf_bytes )
+    {
+        /* Prefetch more bytes. */
+        sh_ctxt->insn_buf_bytes =
+            (!hvm_translate_virtual_addr(
+                x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
+                hvm_access_insn_fetch, sh_ctxt, &addr) &&
+             !hvm_copy_from_guest_linear(
+                 sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+                 PFEC_insn_fetch, NULL))
+            ? sizeof(sh_ctxt->insn_buf) : 0;
+        sh_ctxt->insn_buf_eip = regs->rip;
+    }
+}
+
 /**************************************************************************/
 /* Handling guest writes to pagetables. */
 
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 3251f34ba0..3217777921 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -709,11 +709,26 @@ struct sh_emulate_ctxt {
 #endif
 };
 
+#ifdef CONFIG_HVM
 const struct x86_emulate_ops *shadow_init_emulation(
     struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs,
     unsigned int pte_size);
 void shadow_continue_emulation(
     struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
+#else
+static inline const struct x86_emulate_ops *shadow_init_emulation(
+    struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs,
+    unsigned int pte_size)
+{
+    BUG();
+    return NULL;
+}
+static inline void shadow_continue_emulation(
+    struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs)
+{
+    BUG();
+}
+#endif
 
 /* Stop counting towards early unshadows, as we've seen a real page fault */
 static inline void sh_reset_early_unshadow(struct vcpu *v)
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.