[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [XEN][RFC PATCH 10/15] xc: Add argument to allocate more special pages



On Thu, 2012-03-22 at 15:59 +0000, Julien Grall wrote:
> This patchs permits to allocate more special pages. Indeed, for multiple
> ioreq server, we need to have 2 shared pages by server.
> 
> xc_hvm_build will take an argument which will indicate the number of
> special pages we want to allocate.

struct xc_hvm_build_args was just added to avoid exactly this
proliferation of arguments, you should add this there.

> 
> Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
> ---
>  tools/libxc/xc_hvm_build.c |   57 +++++++++++++++++++++++++------------------
>  tools/libxc/xenguest.h     |    6 +++-
>  tools/libxc/xg_private.c   |    3 +-
>  3 files changed, 39 insertions(+), 27 deletions(-)
> 
> diff --git a/tools/libxc/xc_hvm_build.c b/tools/libxc/xc_hvm_build.c
> index 696c012..62b4ff1 100644
> --- a/tools/libxc/xc_hvm_build.c
> +++ b/tools/libxc/xc_hvm_build.c
> @@ -47,10 +47,11 @@
>  #define SPECIALPAGE_IDENT_PT 6
>  #define SPECIALPAGE_CONSOLE  7
>  #define NR_SPECIAL_PAGES     8
> -#define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
> +#define special_pfn(x, add) (0xff000u - (NR_SPECIAL_PAGES + (add)) + (x))
>  
>  static void build_hvm_info(void *hvm_info_page, uint64_t mem_size,
> -                           uint64_t mmio_start, uint64_t mmio_size)
> +                           uint64_t mmio_start, uint64_t mmio_size,
> +                           uint32_t nr_special_pages)
>  {
>      struct hvm_info_table *hvm_info = (struct hvm_info_table *)
>          (((unsigned char *)hvm_info_page) + HVM_INFO_OFFSET);
> @@ -78,7 +79,7 @@ static void build_hvm_info(void *hvm_info_page, uint64_t 
> mem_size,
>      /* Memory parameters. */
>      hvm_info->low_mem_pgend = lowmem_end >> PAGE_SHIFT;
>      hvm_info->high_mem_pgend = highmem_end >> PAGE_SHIFT;
> -    hvm_info->reserved_mem_pgstart = special_pfn(0);
> +    hvm_info->reserved_mem_pgstart = special_pfn(0, nr_special_pages);
>  
>      /* Finish with the checksum. */
>      for ( i = 0, sum = 0; i < hvm_info->length; i++ )
> @@ -141,7 +142,8 @@ static int check_mmio_hole(uint64_t start, uint64_t 
> memsize,
>  
>  static int setup_guest(xc_interface *xch,
>                         uint32_t dom, const struct xc_hvm_build_args *args,
> -                       char *image, unsigned long image_size)
> +                       char *image, unsigned long image_size,
> +                       uint32_t nr_special_pages)
>  {
>      xen_pfn_t *page_array = NULL;
>      unsigned long i, nr_pages = args->mem_size >> PAGE_SHIFT;
> @@ -334,37 +336,42 @@ static int setup_guest(xc_interface *xch,
>                xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
>                HVM_INFO_PFN)) == NULL )
>          goto error_out;
> -    build_hvm_info(hvm_info_page, v_end, mmio_start, mmio_size);
> +    build_hvm_info(hvm_info_page, v_end, mmio_start, mmio_size, 
> nr_special_pages);
>      munmap(hvm_info_page, PAGE_SIZE);
>  
>      /* Allocate and clear special pages. */
> -    for ( i = 0; i < NR_SPECIAL_PAGES; i++ )
> +    for ( i = 0; i < (NR_SPECIAL_PAGES + nr_special_pages); i++ )
>      {
> -        xen_pfn_t pfn = special_pfn(i);
> +        xen_pfn_t pfn = special_pfn(i, nr_special_pages);
>          rc = xc_domain_populate_physmap_exact(xch, dom, 1, 0, 0, &pfn);
>          if ( rc != 0 )
>          {
>              PERROR("Could not allocate %d'th special page.", i);
>              goto error_out;
>          }
> -        if ( xc_clear_domain_page(xch, dom, special_pfn(i)) )
> +        if ( xc_clear_domain_page(xch, dom, special_pfn(i, 
> nr_special_pages)) )
>              goto error_out;
>      }
>  
>      xc_set_hvm_param(xch, dom, HVM_PARAM_STORE_PFN,
> -                     special_pfn(SPECIALPAGE_XENSTORE));
> +                     special_pfn(SPECIALPAGE_XENSTORE, nr_special_pages));
>      xc_set_hvm_param(xch, dom, HVM_PARAM_BUFIOREQ_PFN,
> -                     special_pfn(SPECIALPAGE_BUFIOREQ));
> +                     special_pfn(SPECIALPAGE_BUFIOREQ, nr_special_pages));
>      xc_set_hvm_param(xch, dom, HVM_PARAM_IOREQ_PFN,
> -                     special_pfn(SPECIALPAGE_IOREQ));
> +                     special_pfn(SPECIALPAGE_IOREQ, nr_special_pages));
>      xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN,
> -                     special_pfn(SPECIALPAGE_CONSOLE));
> +                     special_pfn(SPECIALPAGE_CONSOLE, nr_special_pages));
>      xc_set_hvm_param(xch, dom, HVM_PARAM_PAGING_RING_PFN,
> -                     special_pfn(SPECIALPAGE_PAGING));
> +                     special_pfn(SPECIALPAGE_PAGING, nr_special_pages));
>      xc_set_hvm_param(xch, dom, HVM_PARAM_ACCESS_RING_PFN,
> -                     special_pfn(SPECIALPAGE_ACCESS));
> +                     special_pfn(SPECIALPAGE_ACCESS, nr_special_pages));
>      xc_set_hvm_param(xch, dom, HVM_PARAM_SHARING_RING_PFN,
> -                     special_pfn(SPECIALPAGE_SHARING));
> +                     special_pfn(SPECIALPAGE_SHARING, nr_special_pages));
> +    xc_set_hvm_param(xch, dom, HVM_PARAM_IO_PFN_FIRST,
> +                     special_pfn(NR_SPECIAL_PAGES, nr_special_pages));
> +    xc_set_hvm_param(xch, dom, HVM_PARAM_IO_PFN_LAST,
> +                     special_pfn(NR_SPECIAL_PAGES + nr_special_pages - 1,
> +                                 nr_special_pages));
>  
>      /*
>       * Identity-map page table is required for running with CR0.PG=0 when
> @@ -372,14 +379,14 @@ static int setup_guest(xc_interface *xch,
>       */
>      if ( (ident_pt = xc_map_foreign_range(
>                xch, dom, PAGE_SIZE, PROT_READ | PROT_WRITE,
> -              special_pfn(SPECIALPAGE_IDENT_PT))) == NULL )
> +              special_pfn(SPECIALPAGE_IDENT_PT, nr_special_pages))) == NULL )
>          goto error_out;
>      for ( i = 0; i < PAGE_SIZE / sizeof(*ident_pt); i++ )
>          ident_pt[i] = ((i << 22) | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
>                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
>      munmap(ident_pt, PAGE_SIZE);
>      xc_set_hvm_param(xch, dom, HVM_PARAM_IDENT_PT,
> -                     special_pfn(SPECIALPAGE_IDENT_PT) << PAGE_SHIFT);
> +                     special_pfn(SPECIALPAGE_IDENT_PT, nr_special_pages) << 
> PAGE_SHIFT);
>  
>      /* Insert JMP <rel32> instruction at address 0x0 to reach entry point. */
>      entry_eip = elf_uval(&elf, elf.ehdr, e_entry);
> @@ -406,7 +413,8 @@ static int setup_guest(xc_interface *xch,
>   * Create a domain for a virtualized Linux, using files/filenames.
>   */
>  int xc_hvm_build(xc_interface *xch, uint32_t domid,
> -                 const struct xc_hvm_build_args *hvm_args)
> +                 const struct xc_hvm_build_args *hvm_args,
> +                 uint32_t nr_special_pages)
>  {
>      struct xc_hvm_build_args args = *hvm_args;
>      void *image;
> @@ -432,7 +440,7 @@ int xc_hvm_build(xc_interface *xch, uint32_t domid,
>      if ( image == NULL )
>          return -1;
>  
> -    sts = setup_guest(xch, domid, &args, image, image_size);
> +    sts = setup_guest(xch, domid, &args, image, image_size, 
> nr_special_pages);
>  
>      free(image);
>  
> @@ -447,10 +455,11 @@ int xc_hvm_build(xc_interface *xch, uint32_t domid,
>   * If target == memsize, pages are populated normally.
>   */
>  int xc_hvm_build_target_mem(xc_interface *xch,
> -                           uint32_t domid,
> -                           int memsize,
> -                           int target,
> -                           const char *image_name)
> +                            uint32_t domid,
> +                            int memsize,
> +                            int target,
> +                            const char *image_name,
> +                            uint32_t nr_special_pages)
>  {
>      struct xc_hvm_build_args args = {};
>  
> @@ -458,7 +467,7 @@ int xc_hvm_build_target_mem(xc_interface *xch,
>      args.mem_target = (uint64_t)target << 20;
>      args.image_file_name = image_name;
>  
> -    return xc_hvm_build(xch, domid, &args);
> +    return xc_hvm_build(xch, domid, &args, nr_special_pages);
>  }
>  
>  /*
> diff --git a/tools/libxc/xenguest.h b/tools/libxc/xenguest.h
> index 8d885d3..092ee24 100644
> --- a/tools/libxc/xenguest.h
> +++ b/tools/libxc/xenguest.h
> @@ -188,13 +188,15 @@ struct xc_hvm_build_args {
>   * are optional.
>   */
>  int xc_hvm_build(xc_interface *xch, uint32_t domid,
> -                 const struct xc_hvm_build_args *hvm_args);
> +                 const struct xc_hvm_build_args *hvm_args,
> +                 uint32_t nr_special_pages);
>  
>  int xc_hvm_build_target_mem(xc_interface *xch,
>                              uint32_t domid,
>                              int memsize,
>                              int target,
> -                            const char *image_name);
> +                            const char *image_name,
> +                            uint32_t nr_special_pages);
>  
>  int xc_suspend_evtchn_release(xc_interface *xch, xc_evtchn *xce, int domid, 
> int suspend_evtchn);
>  
> diff --git a/tools/libxc/xg_private.c b/tools/libxc/xg_private.c
> index 3864bc7..e74adce 100644
> --- a/tools/libxc/xg_private.c
> +++ b/tools/libxc/xg_private.c
> @@ -192,7 +192,8 @@ unsigned long csum_page(void *page)
>  __attribute__((weak)) 
>      int xc_hvm_build(xc_interface *xch,
>                       uint32_t domid,
> -                     const struct xc_hvm_build_args *hvm_args)
> +                     const struct xc_hvm_build_args *hvm_args,
> +                     uint32_t nr_servers)
>  {
>      errno = ENOSYS;
>      return -1;



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.