[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 25/34] x86/mm/shadow: make it build with !CONFIG_HVM



Enclose HVM only emulation code under CONFIG_HVM. Add some BUG()s to
to catch any issue.

Note that although some code checks is_hvm_*, which hints it can be
called for PV as well, I can't find such paths.

Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
---
 xen/arch/x86/mm/shadow/common.c | 18 ++++++++++++++++--
 xen/arch/x86/mm/shadow/multi.c  | 27 +++++++++++++++++++++------
 2 files changed, 37 insertions(+), 8 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 0856650..4381538 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -113,6 +113,7 @@ __initcall(shadow_audit_key_init);
 #endif /* SHADOW_AUDIT */
 
 
+#if CONFIG_HVM
 /**************************************************************************/
 /* x86 emulator support for the shadow code
  */
@@ -380,11 +381,13 @@ static const struct x86_emulate_ops 
hvm_shadow_emulator_ops = {
     .cmpxchg    = hvm_emulate_cmpxchg,
     .cpuid      = hvmemul_cpuid,
 };
+#endif
 
 const struct x86_emulate_ops *shadow_init_emulation(
     struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs,
     unsigned int pte_size)
 {
+#if CONFIG_HVM
     struct segment_register *creg, *sreg;
     struct vcpu *v = current;
     unsigned long addr;
@@ -423,6 +426,10 @@ const struct x86_emulate_ops *shadow_init_emulation(
         ? sizeof(sh_ctxt->insn_buf) : 0;
 
     return &hvm_shadow_emulator_ops;
+#else
+    BUG();
+    return NULL;
+#endif
 }
 
 /* Update an initialized emulation context to prepare for the next
@@ -430,6 +437,7 @@ const struct x86_emulate_ops *shadow_init_emulation(
 void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
                                struct cpu_user_regs *regs)
 {
+#if CONFIG_HVM
     unsigned long addr, diff;
 
     ASSERT(is_hvm_vcpu(current));
@@ -451,6 +459,9 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
*sh_ctxt,
             ? sizeof(sh_ctxt->insn_buf) : 0;
         sh_ctxt->insn_buf_eip = regs->rip;
     }
+#else
+    BUG();
+#endif
 }
 
 
@@ -1685,6 +1696,7 @@ static unsigned int shadow_get_allocation(struct domain 
*d)
             + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
 }
 
+#if CONFIG_HVM
 /**************************************************************************/
 /* Handling guest writes to pagetables. */
 
@@ -1957,6 +1969,7 @@ static void sh_emulate_unmap_dest(struct vcpu *v, void 
*addr,
 
     atomic_inc(&v->domain->arch.paging.shadow.gtable_dirty_version);
 }
+#endif
 
 /**************************************************************************/
 /* Hash table for storing the guest->shadow mappings.
@@ -2723,12 +2736,13 @@ static int sh_remove_all_mappings(struct domain *d, 
mfn_t gmfn, gfn_t gfn)
                && (page->count_info & PGC_count_mask) <= 3
                && ((page->u.inuse.type_info & PGT_count_mask)
                    == (is_xen_heap_page(page) ||
-                       is_ioreq_server_page(d, page)))) )
+                       (is_hvm_domain(d) && is_ioreq_server_page(d, page))))) )
         {
             SHADOW_ERROR("can't find all mappings of mfn %lx (gfn %lx): "
                           "c=%lx t=%lx x=%d i=%d\n", mfn_x(gmfn), gfn_x(gfn),
                           page->count_info, page->u.inuse.type_info,
-                          !!is_xen_heap_page(page), is_ioreq_server_page(d, 
page));
+                          !!is_xen_heap_page(page),
+                         is_hvm_domain(d) && is_ioreq_server_page(d, page));
         }
     }
 
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 021ae25..ff7a20c 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2926,18 +2926,25 @@ static int sh_page_fault(struct vcpu *v,
             }
             else
             {
+#if CONFIG_HVM
                 /* Magic MMIO marker: extract gfn for MMIO address */
                 ASSERT(sh_l1e_is_mmio(sl1e));
+                ASSERT(is_hvm_vcpu(v));
                 gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e))))
                        << PAGE_SHIFT)
                     | (va & ~PAGE_MASK);
+                perfc_incr(shadow_fault_fast_mmio);
+                SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
+                sh_reset_early_unshadow(v);
+                trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va);
+                return handle_mmio_with_translation(va, gpa >> PAGE_SHIFT,
+                                                    access)
+                    ? EXCRET_fault_fixed : 0;
+#else
+                /* When HVM is not enabled, there shouldn't be MMIO marker */
+                BUG();
+#endif
             }
-            perfc_incr(shadow_fault_fast_mmio);
-            SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
-            sh_reset_early_unshadow(v);
-            trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va);
-            return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
-                    ? EXCRET_fault_fixed : 0);
         }
         else
         {
@@ -3381,8 +3388,10 @@ static int sh_page_fault(struct vcpu *v,
 
     r = x86_emulate(&emul_ctxt.ctxt, emul_ops);
 
+#if CONFIG_HVM
     if ( r == X86EMUL_EXCEPTION )
     {
+        ASSERT(is_hvm_domain(d));
         /*
          * This emulation covers writes to shadow pagetables.  We tolerate #PF
          * (from accesses spanning pages, concurrent paging updated from
@@ -3404,6 +3413,7 @@ static int sh_page_fault(struct vcpu *v,
             r = X86EMUL_UNHANDLEABLE;
         }
     }
+#endif
 
     /*
      * NB. We do not unshadow on X86EMUL_EXCEPTION. It's not clear that it
@@ -3513,6 +3523,8 @@ static int sh_page_fault(struct vcpu *v,
  mmio:
     if ( !guest_mode(regs) )
         goto not_a_shadow_fault;
+#if CONFIG_HVM
+    ASSERT(is_hvm_vcpu(v));
     perfc_incr(shadow_fault_mmio);
     sh_audit_gw(v, &gw);
     SHADOW_PRINTK("mmio %#"PRIpaddr"\n", gpa);
@@ -3523,6 +3535,9 @@ static int sh_page_fault(struct vcpu *v,
     trace_shadow_gen(TRC_SHADOW_MMIO, va);
     return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT, access)
             ? EXCRET_fault_fixed : 0);
+#else
+    BUG();
+#endif
 
  not_a_shadow_fault:
     sh_audit_gw(v, &gw);
-- 
git-series 0.9.1

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.