[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Minios-devel] [PATCH v3] minios: don't rely on specific page table allocation scheme



Juergen Gross, on Fri 20 Nov 2015 19:32:42 +0100, wrote:
> Today mini-os is making assumptions how the page tables it is started
> with are being allocated. Especially it is using the number of page
> table frames to calculate which is the first unmapped pfn.
> 
> Instead of relying on page table number assumptions just look into the
> page tables to find the first pfn not already mapped.
> 
> Signed-off-by: Juergen Gross <jgross@xxxxxxxx>

Acked-by: Samuel Thibault <samuel.thibault@xxxxxxxxxxxx>

> ---
> Changes in V3:
> - check for every pfn already mapped (suggested by Samuel Thibault)
> 
> Changes in V2:
> - remove need_pt_frame() as it simplifies code (suggested by Wei Liu)
> 
> ---
>  arch/x86/mm.c         | 93 
> ++++++++++++---------------------------------------
>  include/x86/arch_mm.h |  7 ----
>  2 files changed, 21 insertions(+), 79 deletions(-)
> 
> diff --git a/arch/x86/mm.c b/arch/x86/mm.c
> index 9c6d1b8..134a135 100644
> --- a/arch/x86/mm.c
> +++ b/arch/x86/mm.c
> @@ -132,61 +132,6 @@ static void new_pt_frame(unsigned long *pt_pfn, unsigned 
> long prev_l_mfn,
>  }
>  
>  /*
> - * Checks if a pagetable frame is needed at 'level' to map a given
> - * address. Note, this function is specific to the initial page table
> - * building.
> - */
> -static int need_pt_frame(unsigned long va, int level)
> -{
> -    unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
> -#if defined(__x86_64__)
> -    unsigned long hyp_virt_end = HYPERVISOR_VIRT_END;
> -#else
> -    unsigned long hyp_virt_end = 0xffffffff;
> -#endif
> -
> -    /* In general frames will _not_ be needed if they were already
> -       allocated to map the hypervisor into our VA space */
> -#if defined(__x86_64__)
> -    if ( level == L3_FRAME )
> -    {
> -        if ( l4_table_offset(va) >= 
> -             l4_table_offset(hyp_virt_start) &&
> -             l4_table_offset(va) <= 
> -             l4_table_offset(hyp_virt_end))
> -            return 0;
> -        return 1;
> -    } 
> -    else
> -#endif
> -
> -    if ( level == L2_FRAME )
> -    {
> -#if defined(__x86_64__)
> -        if ( l4_table_offset(va) >= 
> -             l4_table_offset(hyp_virt_start) &&
> -             l4_table_offset(va) <= 
> -             l4_table_offset(hyp_virt_end))
> -#endif
> -            if ( l3_table_offset(va) >= 
> -                 l3_table_offset(hyp_virt_start) &&
> -                 l3_table_offset(va) <= 
> -                 l3_table_offset(hyp_virt_end))
> -                return 0;
> -
> -        return 1;
> -    } 
> -    else 
> -        /* Always need l1 frames */
> -        if ( level == L1_FRAME )
> -            return 1;
> -
> -    printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n", 
> -           level, hyp_virt_start, hyp_virt_end);
> -    return -1;
> -}
> -
> -/*
>   * Build the initial pagetable.
>   */
>  static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
> @@ -200,8 +145,9 @@ static void build_pagetable(unsigned long *start_pfn, 
> unsigned long *max_pfn)
>      int count = 0;
>      int rc;
>  
> -    pfn_to_map = 
> -        (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
> +    /* Be conservative: even if we know there will be more pages already
> +       mapped, start the loop at the very beginning. */
> +    pfn_to_map = *start_pfn;
>  
>      if ( *max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START) )
>      {
> @@ -229,9 +175,8 @@ static void build_pagetable(unsigned long *start_pfn, 
> unsigned long *max_pfn)
>  #if defined(__x86_64__)
>          offset = l4_table_offset(start_address);
>          /* Need new L3 pt frame */
> -        if ( !(start_address & L3_MASK) )
> -            if ( need_pt_frame(start_address, L3_FRAME) ) 
> -                new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);
> +        if ( !(tab[offset] & _PAGE_PRESENT) )
> +            new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);
>  
>          page = tab[offset];
>          pt_mfn = pte_to_mfn(page);
> @@ -239,29 +184,33 @@ static void build_pagetable(unsigned long *start_pfn, 
> unsigned long *max_pfn)
>  #endif
>          offset = l3_table_offset(start_address);
>          /* Need new L2 pt frame */
> -        if ( !(start_address & L2_MASK) )
> -            if ( need_pt_frame(start_address, L2_FRAME) )
> -                new_pt_frame(&pt_pfn, pt_mfn, offset, L2_FRAME);
> +        if ( !(tab[offset] & _PAGE_PRESENT) )
> +            new_pt_frame(&pt_pfn, pt_mfn, offset, L2_FRAME);
>  
>          page = tab[offset];
>          pt_mfn = pte_to_mfn(page);
>          tab = to_virt(mfn_to_pfn(pt_mfn) << PAGE_SHIFT);
>          offset = l2_table_offset(start_address);        
>          /* Need new L1 pt frame */
> -        if ( !(start_address & L1_MASK) )
> -            if ( need_pt_frame(start_address, L1_FRAME) )
> -                new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
> +        if ( !(tab[offset] & _PAGE_PRESENT) )
> +            new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
>  
>          page = tab[offset];
>          pt_mfn = pte_to_mfn(page);
> +        tab = to_virt(mfn_to_pfn(pt_mfn) << PAGE_SHIFT);
>          offset = l1_table_offset(start_address);
>  
> -        mmu_updates[count].ptr =
> -            ((pgentry_t)pt_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
> -        mmu_updates[count].val = 
> -            (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
> -        count++;
> -        if ( count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn )
> +        if ( !(tab[offset] & _PAGE_PRESENT) )
> +        {
> +            mmu_updates[count].ptr =
> +                ((pgentry_t)pt_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * 
> offset;
> +            mmu_updates[count].val =
> +                (pgentry_t)pfn_to_mfn(pfn_to_map) << PAGE_SHIFT | L1_PROT;
> +            count++;
> +        }
> +        pfn_to_map++;
> +        if ( count == L1_PAGETABLE_ENTRIES ||
> +             (count && pfn_to_map == *max_pfn) )
>          {
>              rc = HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF);
>              if ( rc < 0 )
> diff --git a/include/x86/arch_mm.h b/include/x86/arch_mm.h
> index 23cfca7..58f29fc 100644
> --- a/include/x86/arch_mm.h
> +++ b/include/x86/arch_mm.h
> @@ -56,12 +56,6 @@
>  
>  #define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
>  
> -/*
> - * If starting from virtual address greater than 0xc0000000,
> - * this value will be 2 to account for final mid-level page
> - * directory which is always mapped in at this location.
> - */
> -#define NOT_L1_FRAMES           3
>  #define PRIpte "016llx"
>  #ifndef __ASSEMBLY__
>  typedef uint64_t pgentry_t;
> @@ -87,7 +81,6 @@ typedef uint64_t pgentry_t;
>  #define L2_MASK  ((1UL << L3_PAGETABLE_SHIFT) - 1)
>  #define L3_MASK  ((1UL << L4_PAGETABLE_SHIFT) - 1)
>  
> -#define NOT_L1_FRAMES           3
>  #define PRIpte "016lx"
>  #ifndef __ASSEMBLY__
>  typedef unsigned long pgentry_t;
> -- 
> 2.6.2
> 

-- 
Samuel
<N>  sl  -  display animations aimed to correct users who accidentally enter
<N>        sl instead of ls.

_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
http://lists.xenproject.org/cgi-bin/mailman/listinfo/minios-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.