[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [V1 PATCH 2/5] PVH xen tools: libxc changes to build a PVH guest.



On Wed, 2013-09-04 at 18:32 -0700, Mukesh Rathor wrote:
> Signed-off-by: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx>
> ---
>  tools/libxc/xc_dom.h      |    1 +
>  tools/libxc/xc_dom_core.c |    8 ++++
>  tools/libxc/xc_dom_x86.c  |   83 
> ++++++++++++++++++++++++++++-----------------
>  3 files changed, 61 insertions(+), 31 deletions(-)
> 
> diff --git a/tools/libxc/xc_dom.h b/tools/libxc/xc_dom.h
> index 86e23ee..5168bcd 100644
> --- a/tools/libxc/xc_dom.h
> +++ b/tools/libxc/xc_dom.h
> @@ -130,6 +130,7 @@ struct xc_dom_image {
>      domid_t console_domid;
>      domid_t xenstore_domid;
>      xen_pfn_t shared_info_mfn;
> +    int pvh_enabled;
>  
>      xc_interface *xch;
>      domid_t guest_domid;
> diff --git a/tools/libxc/xc_dom_core.c b/tools/libxc/xc_dom_core.c
> index 0f367f6..6ebe7ec 100644
> --- a/tools/libxc/xc_dom_core.c
> +++ b/tools/libxc/xc_dom_core.c
> @@ -766,6 +766,14 @@ int xc_dom_parse_image(struct xc_dom_image *dom)
>          goto err;
>      }
>  
> +    if ( dom->pvh_enabled )
> +    {
> +        char *pvh_features = "writable_descriptor_tables|"

const char or just inline in the call please.

> +                             
> "auto_translated_physmap|supervisor_mode_kernel|"
> +                             "hvm_callback_vector";
> +        elf_xen_parse_features(pvh_features, dom->f_requested, NULL);

I suppose this doesn't nobble anything which the user passed in which
was previously parsed into dom->f_requested?

In any case this is now internal to the libxc API so we can change it to
fix it as we please.

> +    }
> +
>      /* check features */
>      for ( i = 0; i < XENFEAT_NR_SUBMAPS; i++ )
>      {
> diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
> index 126c0f8..5b2d61d 100644
> --- a/tools/libxc/xc_dom_x86.c
> +++ b/tools/libxc/xc_dom_x86.c

I don't seem to see anything which would reject PVH on x86_32 at the
moment?

> @@ -415,7 +415,8 @@ static int setup_pgtables_x86_64(struct xc_dom_image *dom)
>          pgpfn = (addr - dom->parms.virt_base) >> PAGE_SHIFT_X86;
>          l1tab[l1off] =
>              pfn_to_paddr(xc_dom_p2m_guest(dom, pgpfn)) | L1_PROT;
> -        if ( (addr >= dom->pgtables_seg.vstart) && 
> +        if ( (!dom->pvh_enabled)                &&
> +             (addr >= dom->pgtables_seg.vstart) &&
>               (addr < dom->pgtables_seg.vend) )
>              l1tab[l1off] &= ~_PAGE_RW; /* page tables are r/o */
>          if ( l1off == (L1_PAGETABLE_ENTRIES_X86_64 - 1) )
> @@ -629,12 +630,6 @@ static int vcpu_x86_64(struct xc_dom_image *dom, void 
> *ptr)
>      /* clear everything */
>      memset(ctxt, 0, sizeof(*ctxt));
>  
> -    ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_64;
> -    ctxt->user_regs.es = FLAT_KERNEL_DS_X86_64;
> -    ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_64;
> -    ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_64;
> -    ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_64;
> -    ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_64;
>      ctxt->user_regs.rip = dom->parms.virt_entry;
>      ctxt->user_regs.rsp =
>          dom->parms.virt_base + (dom->bootstack_pfn + 1) * PAGE_SIZE_X86;
> @@ -642,15 +637,25 @@ static int vcpu_x86_64(struct xc_dom_image *dom, void 
> *ptr)
>          dom->parms.virt_base + (dom->start_info_pfn) * PAGE_SIZE_X86;
>      ctxt->user_regs.rflags = 1 << 9; /* Interrupt Enable */
>  
> -    ctxt->kernel_ss = ctxt->user_regs.ss;
> -    ctxt->kernel_sp = ctxt->user_regs.esp;
> -
>      ctxt->flags = VGCF_in_kernel_X86_64 | VGCF_online_X86_64;
>      cr3_pfn = xc_dom_p2m_guest(dom, dom->pgtables_seg.pfn);
>      ctxt->ctrlreg[3] = xen_pfn_to_cr3_x86_64(cr3_pfn);
>      DOMPRINTF("%s: cr3: pfn 0x%" PRIpfn " mfn 0x%" PRIpfn "",
>                __FUNCTION__, dom->pgtables_seg.pfn, cr3_pfn);
>  
> +    if ( dom->pvh_enabled )
> +        return 0;
> +
> +    ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_64;
> +    ctxt->user_regs.es = FLAT_KERNEL_DS_X86_64;
> +    ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_64;
> +    ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_64;
> +    ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_64;
> +    ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_64;
> +
> +    ctxt->kernel_ss = ctxt->user_regs.ss;
> +    ctxt->kernel_sp = ctxt->user_regs.esp;
> +
>      return 0;
>  }
>  
> @@ -751,7 +756,7 @@ int arch_setup_meminit(struct xc_dom_image *dom)
>      rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type);
>      if ( rc )
>          return rc;
> -    if ( xc_dom_feature_translated(dom) )
> +    if ( xc_dom_feature_translated(dom) && !dom->pvh_enabled )
>      {
>          dom->shadow_enabled = 1;
>          rc = x86_shadow(dom->xch, dom->guest_domid);
> @@ -827,6 +832,38 @@ int arch_setup_bootearly(struct xc_dom_image *dom)
>      return 0;
>  }
>  
> +/*
> + * Map grant table frames into guest physmap. PVH manages grant during boot
> + * via HVM mechanisms.
> + */
> +static int map_grant_table_frames(struct xc_dom_image *dom)
> +{
> +    int i, rc;
> +
> +    if ( dom->pvh_enabled )
> +        return 0;
> +
> +    for ( i = 0; ; i++ )
> +    {
> +        rc = xc_domain_add_to_physmap(dom->xch, dom->guest_domid,
> +                                      XENMAPSPACE_grant_table,
> +                                      i, dom->total_pages + i);
> +        if ( rc != 0 )
> +        {
> +            if ( (i > 0) && (errno == EINVAL) )
> +            {
> +                DOMPRINTF("%s: %d grant tables mapped", __FUNCTION__, i);
> +                break;
> +            }
> +            xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
> +                         "%s: mapping grant tables failed " "(pfn=0x%" PRIpfn
> +                         ", rc=%d)", __FUNCTION__, dom->total_pages + i, rc);
> +            return rc;
> +        }
> +    }
> +    return 0;
> +}
> +
>  int arch_setup_bootlate(struct xc_dom_image *dom)
>  {
>      static const struct {
> @@ -865,7 +902,6 @@ int arch_setup_bootlate(struct xc_dom_image *dom)
>      else
>      {
>          /* paravirtualized guest with auto-translation */
> -        int i;
>  
>          /* Map shared info frame into guest physmap. */
>          rc = xc_domain_add_to_physmap(dom->xch, dom->guest_domid,
> @@ -879,25 +915,10 @@ int arch_setup_bootlate(struct xc_dom_image *dom)
>              return rc;
>          }
>  
> -        /* Map grant table frames into guest physmap. */
> -        for ( i = 0; ; i++ )
> -        {
> -            rc = xc_domain_add_to_physmap(dom->xch, dom->guest_domid,
> -                                          XENMAPSPACE_grant_table,
> -                                          i, dom->total_pages + i);
> -            if ( rc != 0 )
> -            {
> -                if ( (i > 0) && (errno == EINVAL) )
> -                {
> -                    DOMPRINTF("%s: %d grant tables mapped", __FUNCTION__, i);
> -                    break;
> -                }
> -                xc_dom_panic(dom->xch, XC_INTERNAL_ERROR,
> -                             "%s: mapping grant tables failed " "(pfn=0x%"
> -                             PRIpfn ", rc=%d)", __FUNCTION__, 
> dom->total_pages + i, rc);
> -                return rc;
> -            }
> -        }
> +        rc = map_grant_table_frames(dom);
> +        if ( rc != 0 )
> +            return rc;
> +
>          shinfo = dom->shared_info_pfn;
>      }
>  



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.