[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen staging] x86/mm/shadow: make it build with !CONFIG_HVM



commit 29ac2ad612e20904d0e8914e635386b7abe0f656
Author:     Wei Liu <wei.liu2@xxxxxxxxxx>
AuthorDate: Thu Aug 16 11:05:34 2018 +0100
Commit:     Wei Liu <wei.liu2@xxxxxxxxxx>
CommitDate: Sun Aug 26 11:01:03 2018 +0100

    x86/mm/shadow: make it build with !CONFIG_HVM
    
    Enclose HVM only emulation code under CONFIG_HVM. Add some BUG()s to
    to catch any issue.
    
    Note that although some code checks is_hvm_*, which hints it can be
    called for PV as well, I can't find such paths.
    
    Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
    Reviewed-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c | 18 ++++++++++++++++--
 xen/arch/x86/mm/shadow/multi.c  | 30 ++++++++++++++++++++----------
 2 files changed, 36 insertions(+), 12 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index fd42d734e7..b834921f46 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -113,6 +113,7 @@ __initcall(shadow_audit_key_init);
 #endif /* SHADOW_AUDIT */
 
 
+#ifdef CONFIG_HVM
 /**************************************************************************/
 /* x86 emulator support for the shadow code
  */
@@ -380,11 +381,13 @@ static const struct x86_emulate_ops 
hvm_shadow_emulator_ops = {
     .cmpxchg    = hvm_emulate_cmpxchg,
     .cpuid      = hvmemul_cpuid,
 };
+#endif
 
 const struct x86_emulate_ops *shadow_init_emulation(
     struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs,
     unsigned int pte_size)
 {
+#ifdef CONFIG_HVM
     struct segment_register *creg, *sreg;
     struct vcpu *v = current;
     unsigned long addr;
@@ -423,6 +426,10 @@ const struct x86_emulate_ops *shadow_init_emulation(
         ? sizeof(sh_ctxt->insn_buf) : 0;
 
     return &hvm_shadow_emulator_ops;
+#else
+    BUG();
+    return NULL;
+#endif
 }
 
 /* Update an initialized emulation context to prepare for the next
@@ -430,6 +437,7 @@ const struct x86_emulate_ops *shadow_init_emulation(
 void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
                                struct cpu_user_regs *regs)
 {
+#ifdef CONFIG_HVM
     struct vcpu *v = current;
     unsigned long addr, diff;
 
@@ -452,6 +460,9 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
*sh_ctxt,
             ? sizeof(sh_ctxt->insn_buf) : 0;
         sh_ctxt->insn_buf_eip = regs->rip;
     }
+#else
+    BUG();
+#endif
 }
 
 
@@ -1686,6 +1697,7 @@ static unsigned int shadow_get_allocation(struct domain 
*d)
             + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
 }
 
+#ifdef CONFIG_HVM
 /**************************************************************************/
 /* Handling guest writes to pagetables. */
 
@@ -1958,6 +1970,7 @@ static void sh_emulate_unmap_dest(struct vcpu *v, void 
*addr,
 
     atomic_inc(&v->domain->arch.paging.shadow.gtable_dirty_version);
 }
+#endif
 
 /**************************************************************************/
 /* Hash table for storing the guest->shadow mappings.
@@ -2724,12 +2737,13 @@ static int sh_remove_all_mappings(struct domain *d, 
mfn_t gmfn, gfn_t gfn)
                && (page->count_info & PGC_count_mask) <= 3
                && ((page->u.inuse.type_info & PGT_count_mask)
                    == (is_xen_heap_page(page) ||
-                       is_ioreq_server_page(d, page)))) )
+                       (is_hvm_domain(d) && is_ioreq_server_page(d, page))))) )
         {
             SHADOW_ERROR("can't find all mappings of mfn %lx (gfn %lx): "
                           "c=%lx t=%lx x=%d i=%d\n", mfn_x(gmfn), gfn_x(gfn),
                           page->count_info, page->u.inuse.type_info,
-                          !!is_xen_heap_page(page), is_ioreq_server_page(d, 
page));
+                          !!is_xen_heap_page(page),
+                          is_hvm_domain(d) && is_ioreq_server_page(d, page));
         }
     }
 
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 021ae252e4..9e43533f69 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2924,20 +2924,22 @@ static int sh_page_fault(struct vcpu *v,
                 trace_shadow_gen(TRC_SHADOW_FAST_PROPAGATE, va);
                 return 0;
             }
-            else
-            {
-                /* Magic MMIO marker: extract gfn for MMIO address */
-                ASSERT(sh_l1e_is_mmio(sl1e));
-                gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e))))
-                       << PAGE_SHIFT)
-                    | (va & ~PAGE_MASK);
-            }
+#ifdef CONFIG_HVM
+            /* Magic MMIO marker: extract gfn for MMIO address */
+            ASSERT(sh_l1e_is_mmio(sl1e));
+            ASSERT(is_hvm_vcpu(v));
+            gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e))))
+                   << PAGE_SHIFT) | (va & ~PAGE_MASK);
             perfc_incr(shadow_fault_fast_mmio);
             SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
             sh_reset_early_unshadow(v);
             trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va);
-            return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
-                    ? EXCRET_fault_fixed : 0);
+            return handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
+                   ? EXCRET_fault_fixed : 0;
+#else
+            /* When HVM is not enabled, there shouldn't be MMIO marker */
+            BUG();
+#endif
         }
         else
         {
@@ -3381,8 +3383,10 @@ static int sh_page_fault(struct vcpu *v,
 
     r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
 
+#ifdef CONFIG_HVM
     if ( r == X86EMUL_EXCEPTION )
     {
+        ASSERT(is_hvm_domain(d));
         /*
          * This emulation covers writes to shadow pagetables.  We tolerate #PF
          * (from accesses spanning pages, concurrent paging updated from
@@ -3404,6 +3408,7 @@ static int sh_page_fault(struct vcpu *v,
             r = X86EMUL_UNHANDLEABLE;
         }
     }
+#endif
 
     /*
      * NB. We do not unshadow on X86EMUL_EXCEPTION. It's not clear that it
@@ -3513,6 +3518,8 @@ static int sh_page_fault(struct vcpu *v,
  mmio:
     if ( !guest_mode(regs) )
         goto not_a_shadow_fault;
+#ifdef CONFIG_HVM
+    ASSERT(is_hvm_vcpu(v));
     perfc_incr(shadow_fault_mmio);
     sh_audit_gw(v, &gw);
     SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa);
@@ -3523,6 +3530,9 @@ static int sh_page_fault(struct vcpu *v,
     trace_shadow_gen(TRC_SHADOW_MMIO, va);
     return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
             ? EXCRET_fault_fixed : 0);
+#else
+    BUG();
+#endif
 
  not_a_shadow_fault:
     sh_audit_gw(v, &gw);
--
generated by git-patchbot for /home/xen/git/xen.git#staging

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.