[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 5/6] xen/arm: map reserved-memory regions as normal memory in dom0



Hi Stefano,

On 26/02/2019 23:07, Stefano Stabellini wrote:
> reserved-memory regions should be mapped as normal memory. At the
> moment, they get remapped as device memory in dom0 because Xen doesn't
> know any better. Add an explicit check for it.

You probably use an outdated change (> 2 years ago). In recent Xen, Dom0 
MMIO are mapped use p2m_mmio_direct_c. This main difference with 
p2m_ram_rw is the shareability attribute (inner vs outer).

This will also have the advantage to not impair with the rest of Xen.

But I don't think this would be enough. Per [1], reserved-memory region 
is used to carve memory from /memory node. So those regions should be 
described in /memory of the Dom0 DT as well.

> 
> However, reserved-memory regions are allowed to overlap partially or
> completely with memory nodes. In these cases, the overlapping memory is
> reserved-memory and should be handled accordingly.

Do you mind providing your source? If you look at the description in 
Linux bindings, it is clearly they will always overlap with /memory.

[...]

> diff --git a/xen/arch/arm/setup.c b/xen/arch/arm/setup.c
> index 80f0028..74c4707 100644
> --- a/xen/arch/arm/setup.c
> +++ b/xen/arch/arm/setup.c
> @@ -470,10 +470,52 @@ static void __init init_pdx(void)
>       }
>   }
>   
> +static void __init check_reserved_memory(paddr_t *bank_start, paddr_t 
> *bank_size)
> +{
> +    paddr_t bank_end = *bank_start + *bank_size;
> +    struct meminfo mem = bootinfo.mem;
> +    int i;
> +
> +    for ( i = 0; i < bootinfo.reserved_mem.nr_banks; i++ )
> +    {
> +        struct membank rbank = bootinfo.reserved_mem.bank[i];
> +
> +        if ( *bank_start < rbank.start && bank_end <= rbank.start )
> +            continue;
> +
> +        if ( *bank_start >= (rbank.start + rbank.size) )
> +            continue;
> +
> +        /* memory bank overlaps with reserved memory region */
> +        if ( rbank.start > *bank_start )
> +        {
> +            bank_end = rbank.start;
> +            if ( *bank_start + *bank_size > rbank.start + rbank.size )
> +            {
> +                mem.bank[mem.nr_banks].start = rbank.start + rbank.size;
> +                mem.bank[mem.nr_banks].size = *bank_start + *bank_size -
> +                    mem.bank[mem.nr_banks].start;
> +                mem.nr_banks++;
> +            }
> +        }
> +        else if ( rbank.start + rbank.size > *bank_start)
> +        {
> +           if (rbank.start + rbank.size < bank_end )
> +               *bank_start = rbank.start + rbank.size;
> +           else
> +               *bank_start = bank_end;
> +        }
> +
> +        *bank_size = bank_end - *bank_start;
> +    }
> +}

reserved-memory nodes is more nothing more than an extension of an old 
DT binding for reserved memory. We handle them in a few places (see 
consider_modules and dt_unreserved_region). So mostly likely you want to 
extend what we already have.

This would avoid most (if not) all the changes below.

> +
>   #ifdef CONFIG_ARM_32
>   static void __init setup_mm(unsigned long dtb_paddr, size_t dtb_size)
>   {
> -    paddr_t ram_start, ram_end, ram_size;
> +    paddr_t ram_start = ~0;
> +    paddr_t ram_end = 0;
> +    paddr_t ram_size = 0;
>       paddr_t s, e;
>       unsigned long ram_pages;
>       unsigned long heap_pages, xenheap_pages, domheap_pages;
> @@ -487,18 +529,19 @@ static void __init setup_mm(unsigned long dtb_paddr, 
> size_t dtb_size)
>   
>       init_pdx();
>   
> -    ram_start = bootinfo.mem.bank[0].start;
> -    ram_size  = bootinfo.mem.bank[0].size;
> -    ram_end   = ram_start + ram_size;
> -
> -    for ( i = 1; i < bootinfo.mem.nr_banks; i++ )
> +    for ( i = 0; i < bootinfo.mem.nr_banks; i++ )
>       {
> -        paddr_t bank_start = bootinfo.mem.bank[i].start;
> -        paddr_t bank_size = bootinfo.mem.bank[i].size;
> -        paddr_t bank_end = bank_start + bank_size;
> +        paddr_t bank_end;
>   
> -        ram_size  = ram_size + bank_size;
> -        ram_start = min(ram_start,bank_start);
> +        check_reserved_memory(&bootinfo.mem.bank[i].start,
> +                              &bootinfo.mem.bank[i].size);
> +
> +        if ( !bootinfo.mem.bank[i].size )
> +            continue;
> +
> +        bank_end = bootinfo.mem.bank[i].start + bootinfo.mem.bank[i].size;
> +        ram_size  = ram_size + bootinfo.mem.bank[i].size;
> +        ram_start = min(ram_start, bootinfo.mem.bank[i].start);
>           ram_end   = max(ram_end,bank_end);
>       }
>   
> @@ -570,6 +613,9 @@ static void __init setup_mm(unsigned long dtb_paddr, 
> size_t dtb_size)
>           paddr_t bank_start = bootinfo.mem.bank[i].start;
>           paddr_t bank_end = bank_start + bootinfo.mem.bank[i].size;
>   
> +        if ( !bootinfo.mem.bank[i].size )
> +            continue;
> +
>           s = bank_start;
>           while ( s < bank_end )
>           {
> @@ -627,11 +673,21 @@ static void __init setup_mm(unsigned long dtb_paddr, 
> size_t dtb_size)
>       total_pages = 0;
>       for ( bank = 0 ; bank < bootinfo.mem.nr_banks; bank++ )
>       {
> -        paddr_t bank_start = bootinfo.mem.bank[bank].start;
> -        paddr_t bank_size = bootinfo.mem.bank[bank].size;
> -        paddr_t bank_end = bank_start + bank_size;
> +        paddr_t bank_start;
> +        paddr_t bank_size;
> +        paddr_t bank_end;
>           paddr_t s, e;
>   
> +        check_reserved_memory(&bootinfo.mem.bank[bank].start,
> +                              &bootinfo.mem.bank[bank].size);
> +
> +        bank_start = bootinfo.mem.bank[bank].start;
> +        bank_size = bootinfo.mem.bank[bank].size;
> +        bank_end = bank_start + bank_size;
> +
> +        if ( !bank_size )
> +            continue;
> +
>           ram_size = ram_size + bank_size;
>           ram_start = min(ram_start,bank_start);
>           ram_end = max(ram_end,bank_end);
> 

[1] 
https://www.kernel.org/doc/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt

-- 
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.