[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 3/5] xen/domctl, tools: Introduce a new domctl to get guest memory map


  • To: Henry Wang <xin.wang2@xxxxxxx>, <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Michal Orzel <michal.orzel@xxxxxxx>
  • Date: Mon, 11 Mar 2024 10:10:57 +0100
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 165.204.84.17) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=amd.com; dmarc=pass (p=quarantine sp=quarantine pct=100) action=none header.from=amd.com; dkim=none (message not signed); arc=none (0)
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=UYT6tYCtp9BiW0YOTbTimG9Dk+e985YPJ1amZ/gULAc=; b=AbsT9jd9Z02f82vJm0UrV1rFYtLsyNHbPgG3beHfJEdoJZeRFcT6mdtceX4bo2WMiYPFz3db+zOThM3oocxny2R3HrkAKgqv9E3n4peIIplgMrYeH20qN2qL7AZijJ9j9cwD6xGp1J7Uy7jLu8JpoRIV6S+hpauWYGZc53IVpj560uZjiMsOAGiX7qkQ8TZvfHdienkZS0WHLcdoV9a8UbzpFlAnMWI7+miNLZsUvvHjHhvaGK8bNz0bEqPkEKTncFgDv4Eh2GsqR791fRik/z2k9FvY6b9fOeuAEFsTKfNlVxkvCXJH9lRNvo68Foz8H1PbZQAElWQzilwfwwPpRA==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=oOoBJ1abOBTHg8Ftshmo6/+6gbJk0DQWc7wFpEl0yU+e8PlNUnPBa/ePEtpVk0RDbE6X/jln2NHhEtwT+H7mzaNknkxR2MnJzGJrIpDY1dnzjPZ8AmS/Orgcf0HMerbog/7ELG2pSTomx5RVS1Tag0myd3UE+jvSddLC7OhOCiX3eDQGDvtsE0pA1V54tuFBUTl/K+X4OIQ2kbEGdQpdC6JZutHkkCszVy+Pt1B9gogUuSTbBN5JSBx1w5G+JvIzlABRnQVbZz+sJxVnlDnQEpOqk/ZfA1MZKdJ4LvLJoPSu8da/fmWn2KhPUPlBd19o7/wxGCyG9uvwu3kKZUIaYw==
  • Cc: Wei Liu <wl@xxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, "Juergen Gross" <jgross@xxxxxxxx>, Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, "George Dunlap" <george.dunlap@xxxxxxxxxx>, Jan Beulich <jbeulich@xxxxxxxx>, "Julien Grall" <julien@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, "Bertrand Marquis" <bertrand.marquis@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>, Alec Kwapis <alec.kwapis@xxxxxxxxxxxxx>
  • Delivery-date: Mon, 11 Mar 2024 09:11:33 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

Hi Henry,

On 08/03/2024 02:54, Henry Wang wrote:
> There are some use cases where the toolstack needs to know the guest
> memory map. For example, the toolstack helper application
> "init-dom0less" needs to know the guest magic page regions for 1:1
> direct-mapped dom0less DomUs to allocate magic pages.
> 
> To address such needs, add XEN_DOMCTL_get_mem_map hypercall and
> related data structures to query the hypervisor for the guest memory
> map. The guest memory map is recorded in the domain structure and
> currently only guest magic page region is recorded in the guest
> memory map. The guest magic page region is initialized at the domain
> creation time as the layout in the public header, and it is updated
> for 1:1 dom0less DomUs (see the following commit) to avoid conflict
> with RAM.
> 
> Take the opportunity to drop an unnecessary empty line to keep the
> coding style consistent in the file.
> 
> Reported-by: Alec Kwapis <alec.kwapis@xxxxxxxxxxxxx>
> Signed-off-by: Henry Wang <xin.wang2@xxxxxxx>
> ---
> v2:
> - New patch
> RFC: I think the newly introduced "struct xen_domctl_mem_map" is
> quite duplicated with "struct xen_memory_map", any comment on reuse
> the "struct xen_memory_map" for simplicity?
> ---
>  tools/include/xenctrl.h           |  4 ++++
>  tools/libs/ctrl/xc_domain.c       | 32 +++++++++++++++++++++++++++++++
>  xen/arch/arm/domain.c             |  6 ++++++
>  xen/arch/arm/domctl.c             | 19 +++++++++++++++++-
>  xen/arch/arm/include/asm/domain.h |  8 ++++++++
>  xen/include/public/arch-arm.h     |  4 ++++
>  xen/include/public/domctl.h       | 21 ++++++++++++++++++++
>  7 files changed, 93 insertions(+), 1 deletion(-)
> 
> diff --git a/tools/include/xenctrl.h b/tools/include/xenctrl.h
> index 2ef8b4e054..b25e9772a2 100644
> --- a/tools/include/xenctrl.h
> +++ b/tools/include/xenctrl.h
> @@ -1195,6 +1195,10 @@ int xc_domain_setmaxmem(xc_interface *xch,
>                          uint32_t domid,
>                          uint64_t max_memkb);
>  
> +int xc_get_domain_mem_map(xc_interface *xch, uint32_t domid,
> +                          struct xen_mem_region mem_regions[],
> +                          uint32_t *nr_regions);
> +
>  int xc_domain_set_memmap_limit(xc_interface *xch,
>                                 uint32_t domid,
>                                 unsigned long map_limitkb);
> diff --git a/tools/libs/ctrl/xc_domain.c b/tools/libs/ctrl/xc_domain.c
> index f2d9d14b4d..64b46bdfb4 100644
> --- a/tools/libs/ctrl/xc_domain.c
> +++ b/tools/libs/ctrl/xc_domain.c
> @@ -697,6 +697,38 @@ int xc_domain_setmaxmem(xc_interface *xch,
>      return do_domctl(xch, &domctl);
>  }
>  
> +int xc_get_domain_mem_map(xc_interface *xch, uint32_t domid,
> +                          struct xen_mem_region mem_regions[],
> +                          uint32_t *nr_regions)
> +{
> +    int rc;
> +    struct xen_domctl domctl = {
> +        .cmd         = XEN_DOMCTL_get_mem_map,
> +        .domain      = domid,
> +        .u.mem_map = {
> +            .nr_mem_regions = *nr_regions,
> +        },
> +    };
> +
> +    DECLARE_HYPERCALL_BOUNCE(mem_regions,
> +                             sizeof(xen_mem_region_t) * (*nr_regions),
> +                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
> +
> +    if ( !mem_regions || xc_hypercall_bounce_pre(xch, mem_regions) ||
> +         (*nr_regions) < 1 )
> +        return -1;
> +
> +    set_xen_guest_handle(domctl.u.mem_map.buffer, mem_regions);
> +
> +    rc = do_domctl(xch, &domctl);
> +
> +    xc_hypercall_bounce_post(xch, mem_regions);
> +
> +    *nr_regions = domctl.u.mem_map.nr_mem_regions;
> +
> +    return rc;
> +}
> +
>  #if defined(__i386__) || defined(__x86_64__)
>  int xc_domain_set_memory_map(xc_interface *xch,
>                                 uint32_t domid,
> diff --git a/xen/arch/arm/domain.c b/xen/arch/arm/domain.c
> index 5e7a7f3e7e..54f3601ab0 100644
> --- a/xen/arch/arm/domain.c
> +++ b/xen/arch/arm/domain.c
> @@ -696,6 +696,7 @@ int arch_domain_create(struct domain *d,
>  {
>      unsigned int count = 0;
>      int rc;
> +    struct mem_map_domain *mem_map = &d->arch.mem_map;
>  
>      BUILD_BUG_ON(GUEST_MAX_VCPUS < MAX_VIRT_CPUS);
>  
> @@ -785,6 +786,11 @@ int arch_domain_create(struct domain *d,
>      d->arch.sve_vl = config->arch.sve_vl;
>  #endif
>  
> +    mem_map->regions[mem_map->nr_mem_regions].start = GUEST_MAGIC_BASE;
You don't check for exceeding max number of regions. Is the expectation that 
nr_mem_regions
is 0 at this stage? Maybe add an ASSERT here.

> +    mem_map->regions[mem_map->nr_mem_regions].size = GUEST_MAGIC_SIZE;
> +    mem_map->regions[mem_map->nr_mem_regions].type = GUEST_MEM_REGION_MAGIC;
> +    mem_map->nr_mem_regions++;
> +
>      return 0;
>  
>  fail:
> diff --git a/xen/arch/arm/domctl.c b/xen/arch/arm/domctl.c
> index ad56efb0f5..92024bcaa0 100644
> --- a/xen/arch/arm/domctl.c
> +++ b/xen/arch/arm/domctl.c
> @@ -148,7 +148,6 @@ long arch_do_domctl(struct xen_domctl *domctl, struct 
> domain *d,
>  
>          return 0;
>      }
> -
>      case XEN_DOMCTL_vuart_op:
>      {
>          int rc;
> @@ -176,6 +175,24 @@ long arch_do_domctl(struct xen_domctl *domctl, struct 
> domain *d,
>  
>          return rc;
>      }
> +    case XEN_DOMCTL_get_mem_map:
> +    {
> +        int rc;
Without initialization, what will be the rc value on success?

> +        /*
> +         * Cap the number of regions to the minimum value between toolstack 
> and
> +         * hypervisor to avoid overflowing the buffer.
> +         */
> +        uint32_t nr_regions = min(d->arch.mem_map.nr_mem_regions,
> +                                  domctl->u.mem_map.nr_mem_regions);
> +
> +        if ( copy_to_guest(domctl->u.mem_map.buffer,
> +                           d->arch.mem_map.regions,
> +                           nr_regions) ||
> +             __copy_to_guest(u_domctl, domctl, 1) )
In domctl.h, you wrote that nr_regions is IN/OUT but you don't seem to write 
back the actual number
of regions.

> +            rc = -EFAULT;
> +
> +        return rc;
> +    }
>      default:
>          return subarch_do_domctl(domctl, d, u_domctl);
>      }
> diff --git a/xen/arch/arm/include/asm/domain.h 
> b/xen/arch/arm/include/asm/domain.h
> index f1d72c6e48..a559a9e499 100644
> --- a/xen/arch/arm/include/asm/domain.h
> +++ b/xen/arch/arm/include/asm/domain.h
> @@ -10,6 +10,7 @@
>  #include <asm/gic.h>
>  #include <asm/vgic.h>
>  #include <asm/vpl011.h>
> +#include <public/domctl.h>
>  #include <public/hvm/params.h>
>  
>  struct hvm_domain
> @@ -59,6 +60,11 @@ struct paging_domain {
>      unsigned long p2m_total_pages;
>  };
>  
> +struct mem_map_domain {
> +    unsigned int nr_mem_regions;
> +    struct xen_mem_region regions[XEN_MAX_MEM_REGIONS];
> +};
> +
>  struct arch_domain
>  {
>  #ifdef CONFIG_ARM_64
> @@ -77,6 +83,8 @@ struct arch_domain
>  
>      struct paging_domain paging;
>  
> +    struct mem_map_domain mem_map;
> +
>      struct vmmio vmmio;
>  
>      /* Continuable domain_relinquish_resources(). */
> diff --git a/xen/include/public/arch-arm.h b/xen/include/public/arch-arm.h
> index a25e87dbda..a06eaf2dab 100644
> --- a/xen/include/public/arch-arm.h
> +++ b/xen/include/public/arch-arm.h
> @@ -420,6 +420,10 @@ typedef uint64_t xen_callback_t;
>   * should instead use the FDT.
>   */
>  
> +/* Guest memory region types */
> +#define GUEST_MEM_REGION_DEFAULT    0
What's the purpose of this default type? It seems unusued.

> +#define GUEST_MEM_REGION_MAGIC      1
> +
>  /* Physical Address Space */
>  
>  /* Virtio MMIO mappings */
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> index a33f9ec32b..77bf999651 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -946,6 +946,25 @@ struct xen_domctl_paging_mempool {
>      uint64_aligned_t size; /* Size in bytes. */
>  };
>  
> +#define XEN_MAX_MEM_REGIONS 1
The max number of regions can differ between arches. How are you going to 
handle it?

~Michal



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.