[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Minios-devel] [PATCH 30/40] arm64: set the mapping for console and xenbus



Hi Shijie,

On 03/11/17 03:12, Huang Shijie wrote:
This patch sets the mapping for console and xenbus:
    .1) Use the page granule (4K) to setup the page table.

This should belong to the patch building page-table.

    .2) Use the alloc_new_page() to get new page from the memory
        system.

Change-Id: Ic512c89412e40cbff8ebb1635e798357378485a0
Jira: ENTOS-247
Signed-off-by: Huang Shijie <shijie.huang@xxxxxxx>
---
  arch/arm/mm.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++++++++-----
  1 file changed, 55 insertions(+), 5 deletions(-)

diff --git a/arch/arm/mm.c b/arch/arm/mm.c
index fb7886c..f85e5a0 100644
--- a/arch/arm/mm.c
+++ b/arch/arm/mm.c
@@ -37,16 +37,43 @@ static inline void set_pgt_entry(lpae_t *ptr, lpae_t val)
      isb();
  }
+static void build_pte(lpae_t *pud, unsigned long vaddr, unsigned long vend,
+                      paddr_t phys, long mem_type)
+{
+    lpae_t *pte;
+
+    pte = (lpae_t *)to_virt((*pud) & ~ATTR_MASK_L) + l3_pgt_idx(vaddr);
+    do {
+        set_pgt_entry(pte, (phys & L3_MASK) | mem_type | L3_PAGE);
+
+        vaddr += L3_SIZE;
+        phys += L3_SIZE;
+        pte++;
+    } while (vaddr < vend);
+}
+
  static void build_pud(lpae_t *pgd, unsigned long vaddr, unsigned long vend,
                        paddr_t phys, long mem_type,
                        paddr_t (*new_page)(void), int level)
  {
      lpae_t *pud;
+    unsigned long next;
pud = (lpae_t *)to_virt((*pgd) & ~ATTR_MASK_L) + l2_pgt_idx(vaddr);
      do {
-        if (level == 2)
+        if (level == 2) {
               set_pgt_entry(pud, (phys & L2_MASK) | mem_type | L2_BLOCK);
+       } else if (level == 3) {
+             next = vaddr + L2_SIZE;
+             if (next > vend)
+                 next = vend;
+
+             if ((*pud) == L2_INVAL)
+                 set_pgt_entry(pud, (new_page()) | PT_PT);
+
+             build_pte(pud, vaddr, next, phys, mem_type);
+        }
+
          vaddr += L2_SIZE;
          phys += L2_SIZE;
          pud++;
@@ -113,11 +140,38 @@ void init_pagetable(unsigned long *start_pfn, unsigned 
long base_pfn,
      *start_pfn = first_free_pfn;
  }
+
+static paddr_t alloc_new_page(void)
+{
+    unsigned long page;
+
+    page = alloc_page();
+    if (!page)
+        BUG();
+    memset((void *)page, 0, PAGE_SIZE);
+    dsb(ishst);
+    return to_phys(page);
+}
+
+unsigned long map_frame_virt(unsigned long mfn)
+{
+    unsigned long vaddr = (unsigned long)mfn_to_virt(mfn);

If you look at the implementation of to_virt on Arm64, it can return an address within the 64-bit address space. But you only support 39 bits of VA.

As you can't assume where it the console/xenbus frame in the memory layout, there are chance the virtual addresses will be well above 39-bits. Resulting to an access fault.

So you should allocate the virtual address on demand. You can look what x86 does.

This will also benefits when we will need to map lists of MFN in a contiguous way in the VA space.

+
+    build_pagetable(vaddr, mfn, 1, BLOCK_DEF_ATTR, alloc_new_page, 3);

I am a bit surprised that build_pagetable does not return an error. At least it may not be possible to allocate page-table.

+
+    return vaddr;
+}
+
  #else
  void init_pagetable(unsigned long *start_pfn, unsigned long base_pfn,
                      unsigned long max_pfn)
  {
  }
+
+unsigned long map_frame_virt(unsigned long mfn)
+{
+    return mfn_to_virt(mfn);
+}
  #endif
void arch_init_mm(unsigned long *start_pfn_p, unsigned long *max_pfn_p)
@@ -256,7 +310,3 @@ grant_entry_v1_t *arch_init_gnttab(int nr_grant_frames)
      return to_virt(gnttab_table);
  }
-unsigned long map_frame_virt(unsigned long mfn)
-{
-    return mfn_to_virt(mfn);
-}


Cheers,

--
Julien Grall

_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/cgi-bin/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.