[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH v2 6/6] tools/arm: exclude iomem from domU extended regions



On Thu, 8 May 2025, Stewart Hildebrand wrote:
> When a device is passed through to a xl domU, the iomem ranges may
> overlap with the extended regions. Remove iomem from extended regions.
> 
> Signed-off-by: Stewart Hildebrand <stewart.hildebrand@xxxxxxx>

Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>


> ---
> Not sure if we need a Fixes: tag, but if we do:
> Fixes: 57f87857dc2d ("libxl/arm: Add handling of extended regions for DomU")
> 
> v1->v2:
> * no change
> ---
>  tools/libs/light/libxl_arm.c | 118 +++++++++++++++++++++++++++++------
>  1 file changed, 99 insertions(+), 19 deletions(-)
> 
> diff --git a/tools/libs/light/libxl_arm.c b/tools/libs/light/libxl_arm.c
> index 75c811053c7c..8ae16a1726fc 100644
> --- a/tools/libs/light/libxl_arm.c
> +++ b/tools/libs/light/libxl_arm.c
> @@ -798,6 +798,8 @@ static int make_timer_node(libxl__gc *gc, void *fdt,
>      return 0;
>  }
>  
> +#define MAX_NR_EXT_REGIONS   256
> +
>  static int make_hypervisor_node(libxl__gc *gc, void *fdt,
>                                  const libxl_version_info *vers)
>  {
> @@ -821,7 +823,7 @@ static int make_hypervisor_node(libxl__gc *gc, void *fdt,
>       */
>      res = fdt_property_reg_placeholder(gc, fdt, GUEST_ROOT_ADDRESS_CELLS,
>                                         GUEST_ROOT_SIZE_CELLS,
> -                                       GUEST_RAM_BANKS + 1);
> +                                       MAX_NR_EXT_REGIONS + 1);
>      if (res) return res;
>  
>      /*
> @@ -1517,17 +1519,29 @@ static void finalise_one_node(libxl__gc *gc, void 
> *fdt, const char *uname,
>  
>  #define EXT_REGION_MIN_SIZE   xen_mk_ullong(0x0004000000) /* 64MB */
>  
> -static int finalize_hypervisor_node(libxl__gc *gc, struct xc_dom_image *dom)
> +static int compare_iomem(const void *a, const void *b)
> +{
> +    const libxl_iomem_range *x = a, *y = b;
> +
> +    if (x->gfn < y->gfn)
> +        return -1;
> +    if (x->gfn > y->gfn)
> +        return 1;
> +    return 0;
> +}
> +
> +static int finalize_hypervisor_node(libxl__gc *gc,
> +                                    libxl_domain_build_info *b_info,
> +                                    struct xc_dom_image *dom)
>  {
>      void *fdt = dom->devicetree_blob;
> -    uint64_t region_size[GUEST_RAM_BANKS] = {0}, 
> region_base[GUEST_RAM_BANKS],
> -        bankend[GUEST_RAM_BANKS];
> +    uint64_t region_base[MAX_NR_EXT_REGIONS], 
> region_size[MAX_NR_EXT_REGIONS];
>      uint32_t regs[(GUEST_ROOT_ADDRESS_CELLS + GUEST_ROOT_SIZE_CELLS) *
> -                  (GUEST_RAM_BANKS + 1)];
> +                  (MAX_NR_EXT_REGIONS + 1)];
>      be32 *cells = &regs[0];
>      const uint64_t bankbase[] = GUEST_RAM_BANK_BASES;
>      const uint64_t banksize[] = GUEST_RAM_BANK_SIZES;
> -    unsigned int i, len, nr_regions = 0;
> +    unsigned int i, j, len, nr_regions = 0;
>      libxl_dominfo info;
>      int offset, rc;
>  
> @@ -1542,20 +1556,90 @@ static int finalize_hypervisor_node(libxl__gc *gc, 
> struct xc_dom_image *dom)
>      if (info.gpaddr_bits > 64)
>          return ERROR_INVAL;
>  
> +    qsort(b_info->iomem, b_info->num_iomem, sizeof(libxl_iomem_range),
> +          compare_iomem);
> +
>      /*
>       * Try to allocate separate 2MB-aligned extended regions from the first
>       * and second RAM banks taking into the account the maximum supported
>       * guest physical address space size and the amount of memory assigned
>       * to the guest.
>       */
> -    for (i = 0; i < GUEST_RAM_BANKS; i++) {
> -        region_base[i] = bankbase[i] +
> +    for (i = 0; i < GUEST_RAM_BANKS && nr_regions < MAX_NR_EXT_REGIONS; i++) 
> {
> +        struct {
> +            uint64_t start;
> +            uint64_t end; /* inclusive */
> +        } unallocated;
> +        uint64_t size = 0;
> +
> +        unallocated.start = bankbase[i] +
>              ALIGN_UP_TO_2MB((uint64_t)dom->rambank_size[i] << XC_PAGE_SHIFT);
>  
> -        bankend[i] = ~0ULL >> (64 - info.gpaddr_bits);
> -        bankend[i] = min(bankend[i], bankbase[i] + banksize[i] - 1);
> -        if (bankend[i] > region_base[i])
> -            region_size[i] = bankend[i] - region_base[i] + 1;
> +        unallocated.end = ~0ULL >> (64 - info.gpaddr_bits);
> +        unallocated.end = min(unallocated.end, bankbase[i] + banksize[i] - 
> 1);
> +
> +        if (unallocated.end > unallocated.start)
> +            size = unallocated.end - unallocated.start + 1;
> +
> +        if (size < EXT_REGION_MIN_SIZE)
> +            continue;
> +
> +        /* Exclude iomem */
> +        for (j = 0; j < b_info->num_iomem && nr_regions < MAX_NR_EXT_REGIONS;
> +             j++) {
> +            struct {
> +                uint64_t start;
> +                uint64_t end; /* inclusive */
> +            } iomem;
> +
> +            iomem.start = b_info->iomem[j].gfn << XC_PAGE_SHIFT;
> +            iomem.end = ((b_info->iomem[j].gfn + b_info->iomem[j].number)
> +                         << XC_PAGE_SHIFT) - 1;
> +
> +            if (iomem.end >= unallocated.start
> +                && iomem.start <= unallocated.end) {
> +
> +                if (iomem.start <= unallocated.start) {
> +                    unallocated.start = iomem.end + 1;
> +
> +                    if (iomem.end >= unallocated.end)
> +                        /* Complete overlap, discard unallocated region */
> +                        break;
> +
> +                    /* Beginning overlap */
> +                    continue;
> +                }
> +
> +                if (iomem.start > unallocated.start) {
> +                    assert(unallocated.end > unallocated.start);
> +                    size = iomem.start - unallocated.start;
> +
> +                    if (size >= EXT_REGION_MIN_SIZE) {
> +                        region_base[nr_regions] = unallocated.start;
> +                        region_size[nr_regions] = size;
> +                        nr_regions++;
> +                    }
> +
> +                    unallocated.start = iomem.end + 1;
> +
> +                    if (iomem.end >= unallocated.end)
> +                        /* End overlap, discard remaining unallocated region 
> */
> +                        break;
> +                }
> +            }
> +        }
> +
> +        if (unallocated.end > unallocated.start
> +            && nr_regions < MAX_NR_EXT_REGIONS)
> +        {
> +            size = unallocated.end - unallocated.start + 1;
> +
> +            if (size >= EXT_REGION_MIN_SIZE) {
> +                region_base[nr_regions] = unallocated.start;
> +                region_size[nr_regions] = size;
> +                nr_regions++;
> +            }
> +        }
>      }
>  
>      /*
> @@ -1565,16 +1649,12 @@ static int finalize_hypervisor_node(libxl__gc *gc, 
> struct xc_dom_image *dom)
>      set_range(&cells, GUEST_ROOT_ADDRESS_CELLS, GUEST_ROOT_SIZE_CELLS,
>                GUEST_GNTTAB_BASE, GUEST_GNTTAB_SIZE);
>  
> -    for (i = 0; i < GUEST_RAM_BANKS; i++) {
> -        if (region_size[i] < EXT_REGION_MIN_SIZE)
> -            continue;
> -
> +    for (i = 0; i < nr_regions; i++) {
>          LOG(DEBUG, "Extended region %u: %#"PRIx64"->%#"PRIx64"",
> -            nr_regions, region_base[i], region_base[i] + region_size[i]);
> +            i, region_base[i], region_base[i] + region_size[i]);
>  
>          set_range(&cells, GUEST_ROOT_ADDRESS_CELLS, GUEST_ROOT_SIZE_CELLS,
>                    region_base[i], region_size[i]);
> -        nr_regions++;
>      }
>  
>      if (!nr_regions)
> @@ -1626,7 +1706,7 @@ int 
> libxl__arch_domain_finalise_hw_description(libxl__gc *gc,
>  
>      }
>  
> -    res = finalize_hypervisor_node(gc, dom);
> +    res = finalize_hypervisor_node(gc, &d_config->b_info, dom);
>      if (res)
>          return res;
>  
> -- 
> 2.49.0
> 
> 



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.