[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[RFC PATCH 2/6] xen/riscv: Add early page table setup code.



This code sets up initial page table.

Signed-off-by: Xie Xun <xiexun162534@xxxxxxxxx>
---
 xen/arch/riscv/mm.c | 224 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 224 insertions(+)

diff --git a/xen/arch/riscv/mm.c b/xen/arch/riscv/mm.c
index f366b1ca0c..074a2a4396 100644
--- a/xen/arch/riscv/mm.c
+++ b/xen/arch/riscv/mm.c
@@ -81,6 +81,17 @@ unsigned long xenheap_base_pdx __read_mostly;
 unsigned long frametable_virt_end __read_mostly;
 unsigned long frametable_base_pdx;
 
+/*
+ * xen_second_pagetable is indexed with the VPN[2] page table entry field
+ * xen_first_pagetable is accessed from the VPN[1] page table entry field
+ * xen_zeroeth_pagetable is accessed from the VPN[0] page table entry field
+ */
+pte_t xen_second_pagetable[PAGE_ENTRIES] __attribute__((__aligned__(4096)));
+static pte_t xen_first_pagetable[PAGE_ENTRIES]
+    __attribute__((__aligned__(4096)));
+static pte_t xen_zeroeth_pagetable[PAGE_ENTRIES]
+    __attribute__((__aligned__(4096)));
+
 static pte_t xen_fixmap[PAGE_ENTRIES] __attribute__((__aligned__(4096)));
 
 #define THIS_CPU_PGTABLE xen_second_pagetable
@@ -374,6 +385,219 @@ unsigned long get_upper_mfn_bound(void)
     return max_page - 1;
 }
 
+/* Set up leaf pages in a first-level page table. */
+void setup_megapages(pte_t *first_pagetable, unsigned long virtual_start,
+                     unsigned long physical_start, unsigned long page_cnt)
+{
+    unsigned long frame_addr = physical_start;
+    unsigned long end = physical_start + (page_cnt << PAGE_SHIFT);
+    unsigned long vaddr = virtual_start;
+    unsigned long index;
+    pte_t *p;
+
+    BUG_ON(!IS_ALIGNED(physical_start, FIRST_SIZE));
+
+    while ( frame_addr < end )
+    {
+        index = pagetable_first_index(vaddr);
+        p = &first_pagetable[index];
+        p->pte = paddr_to_megapage_ppn(frame_addr);
+        p->pte |= PTE_LEAF_DEFAULT;
+
+        frame_addr += FIRST_SIZE;
+        vaddr += FIRST_SIZE;
+    }
+}
+
+#define resolve_early_addr(x) \
+    ({                                                                         
 \
+         unsigned long * __##x;                                                
 \
+        if ( load_addr_start <= x && x < load_addr_end )                       
 \
+            __##x = (unsigned long *)x;                                        
 \
+        else                                                                   
 \
+            __##x = (unsigned long *)(x + load_addr_start - 
linker_addr_start); \
+        __##x;                                                                 
 \
+     })
+
+void __init clear_pagetables(unsigned long load_addr_start,
+                             unsigned long load_addr_end,
+                             unsigned long linker_addr_start,
+                             unsigned long linker_addr_end)
+{
+    unsigned long *p;
+    unsigned long page;
+    unsigned long i;
+
+    page = (unsigned long)&xen_second_pagetable[0];
+
+    p = resolve_early_addr(page);
+    for ( i = 0; i < ARRAY_SIZE(xen_second_pagetable); i++ )
+    {
+        p[i] = 0ULL;
+    }
+
+    page = (unsigned long)&xen_first_pagetable[0];
+    p = resolve_early_addr(page);
+    for ( i = 0; i < ARRAY_SIZE(xen_first_pagetable); i++ )
+    {
+        p[i] = 0ULL;
+    }
+
+    page = (unsigned long)&xen_zeroeth_pagetable[0];
+    p = resolve_early_addr(page);
+    for ( i = 0; i < ARRAY_SIZE(xen_zeroeth_pagetable); i++ )
+    {
+        p[i] = 0ULL;
+    }
+}
+
+void __attribute__((section(".entry")))
+setup_initial_pagetables(pte_t *second, pte_t *first, pte_t *zeroeth,
+                         unsigned long map_start, unsigned long map_end,
+                         unsigned long pa_start)
+{
+    unsigned long page_addr;
+    unsigned long index2;
+    unsigned long index1;
+    unsigned long index0;
+
+    /* align start addresses */
+    map_start &= ZEROETH_MAP_MASK;
+    pa_start &= ZEROETH_MAP_MASK;
+
+    page_addr = map_start;
+    while ( page_addr < map_end )
+    {
+        index2 = pagetable_second_index(page_addr);
+        index1 = pagetable_first_index(page_addr);
+        index0 = pagetable_zeroeth_index(page_addr);
+
+        /* Setup level2 table */
+        second[index2] = paddr_to_pte((unsigned long)first);
+        second[index2].pte |= PTE_TABLE;
+
+        /* Setup level1 table */
+        first[index1] = paddr_to_pte((unsigned long)zeroeth);
+        first[index1].pte |= PTE_TABLE;
+
+        /* Setup level0 table */
+        if ( !pte_is_valid(&zeroeth[index0]) )
+        {
+            /* Update level0 table */
+            zeroeth[index0] = paddr_to_pte((page_addr - map_start) + pa_start);
+            zeroeth[index0].pte |= PTE_LEAF_DEFAULT;
+        }
+
+        /* Point to next page */
+        page_addr += ZEROETH_SIZE;
+    }
+}
+
+/*
+ * WARNING: load_addr() and linker_addr() are to be called only when the MMU is
+ * disabled and only when executed by the primary CPU.  They cannot refer to
+ * any global variable or functions.
+ */
+
+/*
+ * Convert an addressed layed out at link time to the address where it was 
loaded
+ * by the bootloader.
+ */
+#define load_addr(linker_address)                                              
\
+    ({                                                                         
\
+        unsigned long __linker_address = (unsigned long)(linker_address);      
\
+        if ( linker_addr_start <= __linker_address &&                          
 \
+            __linker_address < linker_addr_end )                               
 \
+        {                                                                      
\
+            __linker_address =                                                 
\
+                __linker_address - linker_addr_start + load_addr_start;        
\
+        }                                                                      
\
+        __linker_address;                                                      
\
+    })
+
+/* Convert boot-time Xen address from where it was loaded by the boot loader 
to the address it was layed out
+ * at link-time.
+ */
+#define linker_addr(load_address)                                              
\
+    ({                                                                         
\
+        unsigned long __load_address = (unsigned long)(load_address);          
\
+        if ( load_addr_start <= __load_address &&                              
 \
+            __load_address < load_addr_end )                                   
 \
+        {                                                                      
\
+            __load_address =                                                   
\
+                __load_address - load_addr_start + linker_addr_start;          
\
+        }                                                                      
\
+        __load_address;                                                        
\
+    })
+
+/*
+ * _setup_initial_pagetables:
+ *
+ * 1) Build the page tables for Xen that map the following:
+ *   1.1)  The physical location of Xen (where the bootloader loaded it)
+ *   1.2)  The link-time location of Xen (where the linker expected Xen's
+ *         addresses to be)
+ * 2) Load the page table into the SATP and enable the MMU
+ */
+void __attribute__((section(".entry")))
+_setup_initial_pagetables(unsigned long load_addr_start,
+                          unsigned long load_addr_end,
+                          unsigned long linker_addr_start,
+                          unsigned long linker_addr_end)
+{
+    pte_t *second;
+    pte_t *first;
+    pte_t *zeroeth;
+
+    clear_pagetables(load_addr_start, load_addr_end,
+                     linker_addr_start, linker_addr_end);
+
+    /* Get the addresses where the page tables were loaded */
+    second = (pte_t *)load_addr(&xen_second_pagetable);
+    first = (pte_t *)load_addr(&xen_first_pagetable);
+    zeroeth = (pte_t *)load_addr(&xen_zeroeth_pagetable);
+
+    /*
+     * Create a mapping of the load time address range to... the load time 
address range.
+     * This mapping is used at boot time only.
+     */
+    setup_initial_pagetables(second, first, zeroeth, load_addr_start,
+                             load_addr_end, load_addr_start);
+
+    /*
+     * Create a mapping from Xen's link-time addresses to where they were 
actually loaded.
+     *
+     * TODO: Protect regions accordingly (e.g., protect text and rodata from 
writes).
+     */
+    setup_initial_pagetables(second, first, zeroeth, linker_addr(&_text_start),
+                             linker_addr(&_text_end), load_addr(&_text_start));
+    setup_initial_pagetables(second, first, zeroeth, linker_addr(&_init_start),
+                             linker_addr(&_init_end), load_addr(&_init_start));
+    setup_initial_pagetables(second, first, zeroeth,
+                             linker_addr(&_cpuinit_start),
+                             linker_addr(&_cpuinit_end),
+                             load_addr(&_cpuinit_start));
+    setup_initial_pagetables(second, first, zeroeth,
+                             linker_addr(&_spinlock_start),
+                             linker_addr(&_spinlock_end),
+                             load_addr(&_spinlock_start));
+    setup_initial_pagetables(second, first, zeroeth,
+                             linker_addr(&_rodata_start),
+                             linker_addr(&_rodata_end),
+                             load_addr(&_rodata_start));
+    setup_initial_pagetables(second, first, zeroeth, linker_addr_start,
+                             linker_addr_end, load_addr_start);
+
+    /* Ensure page table writes precede loading the SATP */
+    asm volatile("sfence.vma");
+
+    /* Enable the MMU and load the new pagetable for Xen */
+    csr_write(CSR_SATP,
+              (load_addr(xen_second_pagetable) >> PAGE_SHIFT) | SATP_MODE_SV39 
<< SATP_MODE_SHIFT);
+
+    phys_offset = load_addr_start - linker_addr_start;
+}
+
 /*
  * Map the table that pte points to.
  */
-- 
2.30.2




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.