[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 25/34] x86/mm/shadow: make it build with !CONFIG_HVM



On Fri, Aug 17, 2018 at 04:12:43PM +0100, Wei Liu wrote:
> Enclose HVM only emulation code under CONFIG_HVM. Add some BUG()s to
> to catch any issue.
> 
> Note that although some code checks is_hvm_*, which hints it can be
> called for PV as well, I can't find such paths.
> 
> Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
> ---
>  xen/arch/x86/mm/shadow/common.c | 18 ++++++++++++++++--
>  xen/arch/x86/mm/shadow/multi.c  | 27 +++++++++++++++++++++------
>  2 files changed, 37 insertions(+), 8 deletions(-)
> 
> diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
> index 0856650..4381538 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -113,6 +113,7 @@ __initcall(shadow_audit_key_init);
>  #endif /* SHADOW_AUDIT */
>  
>  
> +#if CONFIG_HVM
>  /**************************************************************************/
>  /* x86 emulator support for the shadow code
>   */
> @@ -380,11 +381,13 @@ static const struct x86_emulate_ops 
> hvm_shadow_emulator_ops = {
>      .cmpxchg    = hvm_emulate_cmpxchg,
>      .cpuid      = hvmemul_cpuid,
>  };
> +#endif
>  
>  const struct x86_emulate_ops *shadow_init_emulation(
>      struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs,
>      unsigned int pte_size)
>  {
> +#if CONFIG_HVM
>      struct segment_register *creg, *sreg;
>      struct vcpu *v = current;
>      unsigned long addr;
> @@ -423,6 +426,10 @@ const struct x86_emulate_ops *shadow_init_emulation(
>          ? sizeof(sh_ctxt->insn_buf) : 0;
>  
>      return &hvm_shadow_emulator_ops;
> +#else
> +    BUG();

I would usually use ASSERT_UNREACHABLE in such situations.

And here I wonder whether this cannot be called from a PV path,
AFAICT:

do_page_fault -> fixup_page_fault -> paging_fault -> page_fault
(sh_page_fault) -> shadow_init_emulation

But maybe there are other conditions that make this path actually
unreachable (or maybe something in your series changed this path).

> +    return NULL;
> +#endif
>  }
>  
>  /* Update an initialized emulation context to prepare for the next
> @@ -430,6 +437,7 @@ const struct x86_emulate_ops *shadow_init_emulation(
>  void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
>                                 struct cpu_user_regs *regs)
>  {
> +#if CONFIG_HVM
>      unsigned long addr, diff;
>  
>      ASSERT(is_hvm_vcpu(current));
> @@ -451,6 +459,9 @@ void shadow_continue_emulation(struct sh_emulate_ctxt 
> *sh_ctxt,
>              ? sizeof(sh_ctxt->insn_buf) : 0;
>          sh_ctxt->insn_buf_eip = regs->rip;
>      }
> +#else
> +    BUG();
> +#endif
>  }
>  
>  
> @@ -1685,6 +1696,7 @@ static unsigned int shadow_get_allocation(struct domain 
> *d)
>              + ((pg & ((1 << (20 - PAGE_SHIFT)) - 1)) ? 1 : 0));
>  }
>  
> +#if CONFIG_HVM
>  /**************************************************************************/
>  /* Handling guest writes to pagetables. */
>  
> @@ -1957,6 +1969,7 @@ static void sh_emulate_unmap_dest(struct vcpu *v, void 
> *addr,
>  
>      atomic_inc(&v->domain->arch.paging.shadow.gtable_dirty_version);
>  }
> +#endif
>  
>  /**************************************************************************/
>  /* Hash table for storing the guest->shadow mappings.
> @@ -2723,12 +2736,13 @@ static int sh_remove_all_mappings(struct domain *d, 
> mfn_t gmfn, gfn_t gfn)
>                 && (page->count_info & PGC_count_mask) <= 3
>                 && ((page->u.inuse.type_info & PGT_count_mask)
>                     == (is_xen_heap_page(page) ||
> -                       is_ioreq_server_page(d, page)))) )
> +                       (is_hvm_domain(d) && is_ioreq_server_page(d, 
> page))))) )

Isn't this a separate bugfix? is_ioreq_server_page shouldn't be called
for PV domains at all (same below).

>          {
>              SHADOW_ERROR("can't find all mappings of mfn %lx (gfn %lx): "
>                            "c=%lx t=%lx x=%d i=%d\n", mfn_x(gmfn), gfn_x(gfn),
>                            page->count_info, page->u.inuse.type_info,
> -                          !!is_xen_heap_page(page), is_ioreq_server_page(d, 
> page));
> +                          !!is_xen_heap_page(page),
> +                         is_hvm_domain(d) && is_ioreq_server_page(d, page));
>          }
>      }
>  
> diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
> index 021ae25..ff7a20c 100644
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -2926,18 +2926,25 @@ static int sh_page_fault(struct vcpu *v,
>              }
>              else
>              {
> +#if CONFIG_HVM
>                  /* Magic MMIO marker: extract gfn for MMIO address */
>                  ASSERT(sh_l1e_is_mmio(sl1e));
> +                ASSERT(is_hvm_vcpu(v));
>                  gpa = (((paddr_t)(gfn_x(sh_l1e_mmio_get_gfn(sl1e))))
>                         << PAGE_SHIFT)
>                      | (va & ~PAGE_MASK);
> +                perfc_incr(shadow_fault_fast_mmio);
> +                SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
> +                sh_reset_early_unshadow(v);
> +                trace_shadow_gen(TRC_SHADOW_FAST_MMIO, va);
> +                return handle_mmio_with_translation(va, gpa >> PAGE_SHIFT,
> +                                                    access)
> +                    ? EXCRET_fault_fixed : 0;

Could you align the '?' to 'handle'.

Thanks, Roger.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.