[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v7 29/32] libxc/xen: introduce a start info structure for HVMlite guests



On 02/10/15 16:49, Roger Pau Monne wrote:
> This structure contains the physical address of the command line, as well as
> the physical address of the list of loaded modules. The physical address of
> this structure is passed to the guest at boot time in the %ebx register.
>
> Signed-off-by: Roger Pau Monnà <roger.pau@xxxxxxxxxx>
> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
> Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> Cc: Ian Campbell <ian.campbell@xxxxxxxxxx>
> Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> ---
> Changes since v6:
>  - Add a check to make sure the start info data is placed below 4GB.
>  - Make sure byte addresses are treated as uintptr_t.
>  - Fix single-line comment.
>
> Changes since v5:
>  - Change some of the calculations performed to get the total size of the
>    start_info region.
>  - Replace the mention of HVMlite in a comment with PVH.
>  - Don't use 64bit integers in hvm_modlist_entry.
> ---
>  tools/libxc/xc_dom_x86.c | 68 
> +++++++++++++++++++++++++++++++++++++++++++++++-
>  xen/include/public/xen.h | 17 ++++++++++++
>  2 files changed, 84 insertions(+), 1 deletion(-)
>
> diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
> index 85b8288..20a39b7 100644
> --- a/tools/libxc/xc_dom_x86.c
> +++ b/tools/libxc/xc_dom_x86.c
> @@ -561,7 +561,70 @@ static int alloc_magic_pages_hvm(struct xc_dom_image 
> *dom)
>      xc_hvm_param_set(xch, domid, HVM_PARAM_SHARING_RING_PFN,
>                       special_pfn(SPECIALPAGE_SHARING));
>  
> -    if ( dom->device_model )
> +    if ( !dom->device_model )
> +    {
> +        struct xc_dom_seg seg;
> +        struct hvm_start_info *start_info;
> +        char *cmdline;
> +        struct hvm_modlist_entry *modlist;
> +        void *start_page;
> +        size_t cmdline_size = 0;
> +        size_t start_info_size = sizeof(*start_info);
> +
> +        if ( dom->cmdline )
> +        {
> +            cmdline_size = ROUNDUP(strlen(dom->cmdline) + 1, 8);
> +            start_info_size += cmdline_size;
> +
> +        }
> +        if ( dom->ramdisk_blob )
> +            start_info_size += sizeof(*modlist); /* Limited to one module. */
> +
> +        rc = xc_dom_alloc_segment(dom, &seg, "HVMlite start info", 0,
> +                                  start_info_size);
> +        if ( rc != 0 )
> +        {
> +            DOMPRINTF("Unable to reserve memory for the start info");
> +            goto out;
> +        }
> +
> +        start_page = xc_map_foreign_range(xch, domid, start_info_size,
> +                                          PROT_READ | PROT_WRITE,
> +                                          seg.pfn);
> +        if ( start_page == NULL )
> +        {
> +            DOMPRINTF("Unable to map HVM start info page");
> +            goto error_out;
> +        }
> +
> +        start_info = start_page;

You should clear start_info here for sanity sake.  There is nothing
which requires the mapped memory to be clear.

> +        cmdline = start_page + sizeof(*start_info);
> +        modlist = start_page + sizeof(*start_info) + cmdline_size;
> +
> +        if ( dom->cmdline )
> +        {
> +            strncpy(cmdline, dom->cmdline, MAX_GUEST_CMDLINE);
> +            cmdline[MAX_GUEST_CMDLINE - 1] = '\0';
> +            start_info->cmdline_paddr = (seg.pfn << PAGE_SHIFT) +
> +                                ((uintptr_t)cmdline - (uintptr_t)start_info);
> +        }
> +
> +        if ( dom->ramdisk_blob )
> +        {
> +            modlist[0].paddr = dom->ramdisk_seg.vstart - 
> dom->parms.virt_base;
> +            modlist[0].size = dom->ramdisk_seg.vend - 
> dom->ramdisk_seg.vstart;
> +            start_info->modlist_paddr = (seg.pfn << PAGE_SHIFT) +
> +                                ((uintptr_t)modlist - (uintptr_t)start_info);
> +            start_info->nr_modules = 1;
> +        }
> +
> +        start_info->magic = HVM_START_MAGIC_VALUE;
> +
> +        munmap(start_page, start_info_size);
> +
> +        dom->start_info_pfn = seg.pfn;
> +    }
> +    else
>      {
>          /*
>           * Allocate and clear additional ioreq server pages. The default
> @@ -915,6 +978,9 @@ static int vcpu_hvm(struct xc_dom_image *dom)
>      /* Set the IP. */
>      bsp_ctx.cpu.rip = dom->parms.phys_entry;
>  
> +    if ( dom->start_info_pfn )
> +        bsp_ctx.cpu.rbx = dom->start_info_pfn << PAGE_SHIFT;
> +
>      /* Set the end descriptor. */
>      bsp_ctx.end_d.typecode = HVM_SAVE_CODE(END);
>      bsp_ctx.end_d.instance = 0;
> diff --git a/xen/include/public/xen.h b/xen/include/public/xen.h
> index ff5547e..709e12c 100644
> --- a/xen/include/public/xen.h
> +++ b/xen/include/public/xen.h
> @@ -784,6 +784,23 @@ struct start_info {
>  };
>  typedef struct start_info start_info_t;
>  
> +/* Start of day structure passed to PVH guests in %ebx. */
> +struct hvm_start_info {
> +#define HVM_START_MAGIC_VALUE 0x336ec578
> +    uint32_t magic;             /* Contains the magic value 0x336ec578       
> */
> +                                /* ("xEn3" with the 0x80 bit of the "E" 
> set).*/
> +    uint32_t flags;             /* SIF_xxx flags.                            
> */
> +    uint32_t cmdline_paddr;     /* Physical address of the command line.     
> */
> +    uint32_t nr_modules;        /* Number of modules passed to the kernel.   
> */
> +    uint32_t modlist_paddr;     /* Physical address of an array of           
> */
> +                                /* hvm_modlist_entry.                        
> */

We should state that nothing will be loaded at 0, so a paddr of 0 means
"not present".

Otherwise,

Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.