[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3/3] minios: general cleanup of mm.c



Major clean up of mm.c. Added comments, fixed coding style, more sensible variable names, mark some local functions static etc.

Signed-off-by: Rolf Neugebauer <rolf.neugebauer@xxxxxxxxxxxxx>
---
diff -r 21b2f5f70aaa extras/mini-os/arch/x86/mm.c
--- a/extras/mini-os/arch/x86/mm.c      Thu Feb 19 16:41:22 2009 +0000
+++ b/extras/mini-os/arch/x86/mm.c      Thu Feb 19 17:23:16 2009 +0000
@@ -52,20 +52,26 @@
 unsigned long *phys_to_machine_mapping;
 unsigned long mfn_zero;
 extern char stack[];
-extern void page_walk(unsigned long virt_addr);
+extern void page_walk(unsigned long va);

-void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
-                                unsigned long offset, unsigned long level)
+/*
+ * Make pt_pfn a new 'level' page table frame and hook it into the page
+ * table at offset in previous level MFN (pref_l_mfn). pt_pfn is a guest
+ * PFN.
+ */
+static void new_pt_frame(unsigned long *pt_pfn, unsigned long prev_l_mfn,
+                         unsigned long offset, unsigned long level)
 {
     pgentry_t *tab = (pgentry_t *)start_info.pt_base;
     unsigned long pt_page = (unsigned long)pfn_to_virt(*pt_pfn);
     pgentry_t prot_e, prot_t;
     mmu_update_t mmu_updates[1];
+    int rc;

     prot_e = prot_t = 0;
-    DEBUG("Allocating new L%d pt frame for pt_pfn=%lx, "
-           "prev_l_mfn=%lx, offset=%lx",
-           level, *pt_pfn, prev_l_mfn, offset);
+    DEBUG("Allocating new L%d pt frame for pfn=%lx, "
+          "prev_l_mfn=%lx, offset=%lx",
+          level, *pt_pfn, prev_l_mfn, offset);

     /* We need to clear the page, otherwise we might fail to map it
        as a page table page */
@@ -74,56 +80,63 @@
     switch ( level )
     {
     case L1_FRAME:
-         prot_e = L1_PROT;
-         prot_t = L2_PROT;
-         break;
+        prot_e = L1_PROT;
+        prot_t = L2_PROT;
+        break;
     case L2_FRAME:
-         prot_e = L2_PROT;
-         prot_t = L3_PROT;
-         break;
+        prot_e = L2_PROT;
+        prot_t = L3_PROT;
+        break;
 #if defined(__x86_64__)
     case L3_FRAME:
-         prot_e = L3_PROT;
-         prot_t = L4_PROT;
-         break;
+        prot_e = L3_PROT;
+        prot_t = L4_PROT;
+        break;
 #endif
     default:
- printk("new_pt_frame() called with invalid level number %d\n", level);
-         do_exit();
-         break;
+ printk("new_pt_frame() called with invalid level number %d\n", level);
+        do_exit();
+        break;
     }

-    /* Update the entry */
+    /* Make PFN a page table page */
 #if defined(__x86_64__)
     tab = pte_to_virt(tab[l4_table_offset(pt_page)]);
 #endif
     tab = pte_to_virt(tab[l3_table_offset(pt_page)]);

     mmu_updates[0].ptr = (tab[l2_table_offset(pt_page)] & PAGE_MASK) +
-                         sizeof(pgentry_t) * l1_table_offset(pt_page);
+        sizeof(pgentry_t) * l1_table_offset(pt_page);
     mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT |
-                         (prot_e & ~_PAGE_RW);
-    if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
+        (prot_e & ~_PAGE_RW);
+
+ if ( (rc = HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF)) < 0 )
     {
-         printk("PTE for new page table page could not be updated\n");
-         do_exit();
+ printk("ERROR: PTE for new page table page could not be updated\n");
+        printk("       mmu_update failed with rc=%d\n", rc);
+        do_exit();
     }
-
-    /* Now fill the new page table page with entries.
-       Update the page directory as well. */
- mmu_updates[0].ptr = ((pgentry_t)prev_l_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
+
+    /* Hook the new page table page into the hierarchy */
+    mmu_updates[0].ptr =
+        ((pgentry_t)prev_l_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
mmu_updates[0].val = (pgentry_t)pfn_to_mfn(*pt_pfn) << PAGE_SHIFT | prot_t;
-    if(HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF) < 0)
+
+ if ( (rc = HYPERVISOR_mmu_update(mmu_updates, 1, NULL, DOMID_SELF)) < 0 )
     {
-       printk("ERROR: mmu_update failed\n");
-       do_exit();
+        printk("ERROR: mmu_update failed with rc=%d\n", rc);
+        do_exit();
     }

     *pt_pfn += 1;
 }

-/* Checks if a pagetable frame is needed (if weren't allocated by Xen) */
-static int need_pt_frame(unsigned long virt_address, int level)
+/*
+ * Checks if a pagetable frame is needed at 'level' to map a given
+ * address. Note, this function is specific to the initial page table
+ * building.
+ */
+static int need_pt_frame(unsigned long va, int level)
 {
     unsigned long hyp_virt_start = HYPERVISOR_VIRT_START;
 #if defined(__x86_64__)
@@ -135,63 +148,71 @@
     /* In general frames will _not_ be needed if they were already
        allocated to map the hypervisor into our VA space */
 #if defined(__x86_64__)
-    if(level == L3_FRAME)
+    if ( level == L3_FRAME )
     {
-        if(l4_table_offset(virt_address) >=
-           l4_table_offset(hyp_virt_start) &&
-           l4_table_offset(virt_address) <=
-           l4_table_offset(hyp_virt_end))
+        if ( l4_table_offset(va) >=
+             l4_table_offset(hyp_virt_start) &&
+             l4_table_offset(va) <=
+             l4_table_offset(hyp_virt_end))
             return 0;
         return 1;
-    } else
+    }
+    else
 #endif

-    if(level == L2_FRAME)
+    if ( level == L2_FRAME )
     {
 #if defined(__x86_64__)
-        if(l4_table_offset(virt_address) >=
-           l4_table_offset(hyp_virt_start) &&
-           l4_table_offset(virt_address) <=
-           l4_table_offset(hyp_virt_end))
+        if ( l4_table_offset(va) >=
+             l4_table_offset(hyp_virt_start) &&
+             l4_table_offset(va) <=
+             l4_table_offset(hyp_virt_end))
 #endif
-            if(l3_table_offset(virt_address) >=
-               l3_table_offset(hyp_virt_start) &&
-               l3_table_offset(virt_address) <=
-               l3_table_offset(hyp_virt_end))
+            if ( l3_table_offset(va) >=
+                 l3_table_offset(hyp_virt_start) &&
+                 l3_table_offset(va) <=
+                 l3_table_offset(hyp_virt_end))
                 return 0;

         return 1;
-    } else
-
-    /* Always need l1 frames */
-    if(level == L1_FRAME)
-        return 1;
+    }
+    else
+        /* Always need l1 frames */
+        if ( level == L1_FRAME )
+            return 1;

     printk("ERROR: Unknown frame level %d, hypervisor %llx,%llx\n",
-        level, hyp_virt_start, hyp_virt_end);
+           level, hyp_virt_start, hyp_virt_end);
     return -1;
 }

-void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
+/*
+ * Build the initial pagetable.
+ */
+static void build_pagetable(unsigned long *start_pfn, unsigned long *max_pfn)
 {
     unsigned long start_address, end_address;
     unsigned long pfn_to_map, pt_pfn = *start_pfn;
     static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
     pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
-    unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
+    unsigned long pt_mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
     unsigned long offset;
     int count = 0;
+    int rc;

- pfn_to_map = (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;
+    pfn_to_map =
+        (start_info.nr_pt_frames - NOT_L1_FRAMES) * L1_PAGETABLE_ENTRIES;

-    if (*max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START))
+    if ( *max_pfn >= virt_to_pfn(HYPERVISOR_VIRT_START) )
     {
         printk("WARNING: Mini-OS trying to use Xen virtual space. "
                "Truncating memory from %dMB to ",
- ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
+               ((unsigned long)pfn_to_virt(*max_pfn) -
+                (unsigned long)&_text)>>20);
         *max_pfn = virt_to_pfn(HYPERVISOR_VIRT_START - PAGE_SIZE);
         printk("%dMB\n",
- ((unsigned long)pfn_to_virt(*max_pfn) - (unsigned long)&_text)>>20);
+               ((unsigned long)pfn_to_virt(*max_pfn) -
+                (unsigned long)&_text)>>20);
     }

     start_address = (unsigned long)pfn_to_virt(pfn_to_map);
@@ -200,49 +221,53 @@
     /* We worked out the virtual memory range to map, now mapping loop */
printk("Mapping memory range 0x%lx - 0x%lx\n", start_address, end_address);

-    while(start_address < end_address)
+    while ( start_address < end_address )
     {
         tab = (pgentry_t *)start_info.pt_base;
-        mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
+        pt_mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));

 #if defined(__x86_64__)
         offset = l4_table_offset(start_address);
         /* Need new L3 pt frame */
-        if(!(start_address & L3_MASK))
-            if(need_pt_frame(start_address, L3_FRAME))
-                new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
+        if ( !(start_address & L3_MASK) )
+            if ( need_pt_frame(start_address, L3_FRAME) )
+                new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);

         page = tab[offset];
-        mfn = pte_to_mfn(page);
-        tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+        pt_mfn = pte_to_mfn(page);
+        tab = to_virt(mfn_to_pfn(pt_mfn) << PAGE_SHIFT);
 #endif
         offset = l3_table_offset(start_address);
         /* Need new L2 pt frame */
-        if(!(start_address & L2_MASK))
-            if(need_pt_frame(start_address, L2_FRAME))
-                new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
+        if ( !(start_address & L2_MASK) )
+            if ( need_pt_frame(start_address, L2_FRAME) )
+                new_pt_frame(&pt_pfn, pt_mfn, offset, L2_FRAME);

         page = tab[offset];
-        mfn = pte_to_mfn(page);
-        tab = to_virt(mfn_to_pfn(mfn) << PAGE_SHIFT);
+        pt_mfn = pte_to_mfn(page);
+        tab = to_virt(mfn_to_pfn(pt_mfn) << PAGE_SHIFT);
         offset = l2_table_offset(start_address);
         /* Need new L1 pt frame */
-        if(!(start_address & L1_MASK))
-            if(need_pt_frame(start_address, L1_FRAME))
-                new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
+        if ( !(start_address & L1_MASK) )
+            if ( need_pt_frame(start_address, L1_FRAME) )
+                new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);

         page = tab[offset];
-        mfn = pte_to_mfn(page);
+        pt_mfn = pte_to_mfn(page);
         offset = l1_table_offset(start_address);

- mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset; - mmu_updates[count].val = (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
+        mmu_updates[count].ptr =
+            ((pgentry_t)pt_mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
+        mmu_updates[count].val =
+            (pgentry_t)pfn_to_mfn(pfn_to_map++) << PAGE_SHIFT | L1_PROT;
         count++;
-        if (count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn)
+        if ( count == L1_PAGETABLE_ENTRIES || pfn_to_map == *max_pfn )
         {
- if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0) + rc = HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF);
+            if ( rc < 0 )
             {
-                printk("PTE could not be updated\n");
+ printk("ERROR: build_pagetable(): PTE could not be updated\n");
+                printk("       mmu_update failed with rc=%d\n", rc);
                 do_exit();
             }
             count = 0;
@@ -253,20 +278,26 @@
     *start_pfn = pt_pfn;
 }

+/*
+ * Mark portion of the address space read only.
+ */
 extern void shared_info;
 static void set_readonly(void *text, void *etext)
 {
- unsigned long start_address = ((unsigned long) text + PAGE_SIZE - 1) & PAGE_MASK;
+    unsigned long start_address =
+        ((unsigned long) text + PAGE_SIZE - 1) & PAGE_MASK;
     unsigned long end_address = (unsigned long) etext;
     static mmu_update_t mmu_updates[L1_PAGETABLE_ENTRIES + 1];
     pgentry_t *tab = (pgentry_t *)start_info.pt_base, page;
     unsigned long mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));
     unsigned long offset;
     int count = 0;
+    int rc;

     printk("setting %p-%p readonly\n", text, etext);

-    while (start_address + PAGE_SIZE <= end_address) {
+    while ( start_address + PAGE_SIZE <= end_address )
+    {
         tab = (pgentry_t *)start_info.pt_base;
         mfn = pfn_to_mfn(virt_to_pfn(start_info.pt_base));

@@ -287,20 +318,25 @@

         offset = l1_table_offset(start_address);

-       if (start_address != (unsigned long)&shared_info) {
- mmu_updates[count].ptr = ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
-           mmu_updates[count].val = tab[offset] & ~_PAGE_RW;
-           count++;
-       } else
-           printk("skipped %p\n", start_address);
+        if ( start_address != (unsigned long)&shared_info )
+        {
+            mmu_updates[count].ptr =
+ ((pgentry_t)mfn << PAGE_SHIFT) + sizeof(pgentry_t) * offset;
+            mmu_updates[count].val = tab[offset] & ~_PAGE_RW;
+            count++;
+        }
+        else
+            printk("skipped %p\n", start_address);

         start_address += PAGE_SIZE;

- if (count == L1_PAGETABLE_ENTRIES || start_address + PAGE_SIZE > end_address)
+        if ( count == L1_PAGETABLE_ENTRIES ||
+             start_address + PAGE_SIZE > end_address )
         {
- if(HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF) < 0) + rc = HYPERVISOR_mmu_update(mmu_updates, count, NULL, DOMID_SELF);
+            if ( rc < 0 )
             {
-                printk("PTE could not be updated\n");
+ printk("ERROR: set_readonly(): PTE could not be updated\n");
                 do_exit();
             }
             count = 0;
@@ -308,11 +344,11 @@
     }

     {
-       mmuext_op_t op = {
-           .cmd = MMUEXT_TLB_FLUSH_ALL,
-       };
-       int count;
-       HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
+        mmuext_op_t op = {
+            .cmd = MMUEXT_TLB_FLUSH_ALL,
+        };
+        int count;
+        HYPERVISOR_mmuext_op(&op, 1, &count, DOMID_SELF);
     }
 }

@@ -371,7 +407,10 @@
 }


-static pgentry_t *get_pgt(unsigned long addr)
+/*
+ * get the PTE for virtual address va if it exists. Otherwise NULL.
+ */
+static pgentry_t *get_pgt(unsigned long va)
 {
     unsigned long mfn;
     pgentry_t *tab;
@@ -381,67 +420,78 @@
     mfn = virt_to_mfn(start_info.pt_base);

 #if defined(__x86_64__)
-    offset = l4_table_offset(addr);
-    if (!(tab[offset] & _PAGE_PRESENT))
+    offset = l4_table_offset(va);
+    if ( !(tab[offset] & _PAGE_PRESENT) )
         return NULL;
     mfn = pte_to_mfn(tab[offset]);
     tab = mfn_to_virt(mfn);
 #endif
-    offset = l3_table_offset(addr);
-    if (!(tab[offset] & _PAGE_PRESENT))
+    offset = l3_table_offset(va);
+    if ( !(tab[offset] & _PAGE_PRESENT) )
         return NULL;
     mfn = pte_to_mfn(tab[offset]);
     tab = mfn_to_virt(mfn);
-    offset = l2_table_offset(addr);
-    if (!(tab[offset] & _PAGE_PRESENT))
+    offset = l2_table_offset(va);
+    if ( !(tab[offset] & _PAGE_PRESENT) )
         return NULL;
     mfn = pte_to_mfn(tab[offset]);
     tab = mfn_to_virt(mfn);
-    offset = l1_table_offset(addr);
+    offset = l1_table_offset(va);
     return &tab[offset];
 }

-pgentry_t *need_pgt(unsigned long addr)
+
+/*
+ * return a valid PTE for a given virtual address. If PTE does not exist,
+ * allocate page-table pages.
+ */
+pgentry_t *need_pgt(unsigned long va)
 {
-    unsigned long mfn;
+    unsigned long pt_mfn;
     pgentry_t *tab;
     unsigned long pt_pfn;
     unsigned offset;

     tab = (pgentry_t *)start_info.pt_base;
-    mfn = virt_to_mfn(start_info.pt_base);
+    pt_mfn = virt_to_mfn(start_info.pt_base);

 #if defined(__x86_64__)
-    offset = l4_table_offset(addr);
-    if (!(tab[offset] & _PAGE_PRESENT)) {
+    offset = l4_table_offset(va);
+    if ( !(tab[offset] & _PAGE_PRESENT) )
+    {
         pt_pfn = virt_to_pfn(alloc_page());
-        new_pt_frame(&pt_pfn, mfn, offset, L3_FRAME);
+        new_pt_frame(&pt_pfn, pt_mfn, offset, L3_FRAME);
     }
     ASSERT(tab[offset] & _PAGE_PRESENT);
-    mfn = pte_to_mfn(tab[offset]);
-    tab = mfn_to_virt(mfn);
+    pt_mfn = pte_to_mfn(tab[offset]);
+    tab = mfn_to_virt(pt_mfn);
 #endif
-    offset = l3_table_offset(addr);
-    if (!(tab[offset] & _PAGE_PRESENT)) {
+    offset = l3_table_offset(va);
+    if ( !(tab[offset] & _PAGE_PRESENT) )
+    {
         pt_pfn = virt_to_pfn(alloc_page());
-        new_pt_frame(&pt_pfn, mfn, offset, L2_FRAME);
+        new_pt_frame(&pt_pfn, pt_mfn, offset, L2_FRAME);
     }
     ASSERT(tab[offset] & _PAGE_PRESENT);
-    mfn = pte_to_mfn(tab[offset]);
-    tab = mfn_to_virt(mfn);
-    offset = l2_table_offset(addr);
-    if (!(tab[offset] & _PAGE_PRESENT)) {
+    pt_mfn = pte_to_mfn(tab[offset]);
+    tab = mfn_to_virt(pt_mfn);
+    offset = l2_table_offset(va);
+    if ( !(tab[offset] & _PAGE_PRESENT) )
+    {
         pt_pfn = virt_to_pfn(alloc_page());
-       new_pt_frame(&pt_pfn, mfn, offset, L1_FRAME);
+        new_pt_frame(&pt_pfn, pt_mfn, offset, L1_FRAME);
     }
     ASSERT(tab[offset] & _PAGE_PRESENT);
-    mfn = pte_to_mfn(tab[offset]);
-    tab = mfn_to_virt(mfn);
+    pt_mfn = pte_to_mfn(tab[offset]);
+    tab = mfn_to_virt(pt_mfn);

-    offset = l1_table_offset(addr);
+    offset = l1_table_offset(va);
     return &tab[offset];
 }

+/*
+ * Reserve an area of virtual address space for mappings and Heap
+ */
 static unsigned long demand_map_area_start;
 #ifdef __x86_64__
 #define DEMAND_MAP_PAGES ((128ULL << 30) / PAGE_SIZE)
@@ -466,7 +516,8 @@

     demand_map_area_start = (unsigned long) pfn_to_virt(cur_pfn);
     cur_pfn += DEMAND_MAP_PAGES;
- printk("Demand map pfns at %lx-%lx.\n", demand_map_area_start, pfn_to_virt(cur_pfn));
+    printk("Demand map pfns at %lx-%lx.\n",
+           demand_map_area_start, pfn_to_virt(cur_pfn));

 #ifdef HAVE_LIBC
     cur_pfn++;
@@ -477,93 +528,123 @@
 #endif
 }

+unsigned long allocate_ondemand(unsigned long n, unsigned long alignment)
+{
+    unsigned long x;
+    unsigned long y = 0;
+
+    /* Find a properly aligned run of n contiguous frames */
+    for ( x = 0;
+          x <= DEMAND_MAP_PAGES - n;
+          x = (x + y + 1 + alignment - 1) & ~(alignment - 1) )
+    {
+        unsigned long addr = demand_map_area_start + x * PAGE_SIZE;
+        pgentry_t *pgt = get_pgt(addr);
+        for ( y = 0; y < n; y++, addr += PAGE_SIZE )
+        {
+            if ( !(addr & L1_MASK) )
+                pgt = get_pgt(addr);
+            if ( pgt )
+            {
+                if ( *pgt & _PAGE_PRESENT )
+                    break;
+                pgt++;
+            }
+        }
+        if ( y == n )
+            break;
+    }
+    if ( y != n )
+    {
+        printk("Failed to find %ld frames!\n", n);
+        return 0;
+    }
+    return demand_map_area_start + x * PAGE_SIZE;
+}
+
+/*
+ * Map an array of MFNs contiguously into virtual address space starting at
+ * va. map f[i*stride]+i*increment for i in 0..n-1.
+ */
 #define MAP_BATCH ((STACK_SIZE / 2) / sizeof(mmu_update_t))
-void do_map_frames(unsigned long addr,
-        unsigned long *f, unsigned long n, unsigned long stride,
-       unsigned long increment, domid_t id, int may_fail, unsigned long prot)
+void do_map_frames(unsigned long va,
+                   unsigned long *mfns, unsigned long n,
+                   unsigned long stride, unsigned long incr,
+                   domid_t id, int may_fail,
+                   unsigned long prot)
 {
     pgentry_t *pgt = NULL;
     unsigned long done = 0;
     unsigned long i;
     int rc;

-    while (done < n) {
-       unsigned long todo;
+    if ( !mfns )
+    {
+        printk("do_map_frames: no mfns supplied\n");
+        return;
+    }
+ DEBUG("va=%p n=0x%lx, mfns[0]=0x%lx stride=0x%lx incr=0x%lx prot=0x%lx\n",
+          va, n, mfns[0], stride, incr, prot);
+
+    while ( done < n )
+    {
+        unsigned long todo;

-       if (may_fail)
-           todo = 1;
-       else
-           todo = n - done;
+        if ( may_fail )
+            todo = 1;
+        else
+            todo = n - done;

-       if (todo > MAP_BATCH)
-               todo = MAP_BATCH;
+        if ( todo > MAP_BATCH )
+            todo = MAP_BATCH;

-       {
-           mmu_update_t mmu_updates[todo];
+        {
+            mmu_update_t mmu_updates[todo];

-           for (i = 0; i < todo; i++, addr += PAGE_SIZE, pgt++) {
-                if (!pgt || !(addr & L1_MASK))
-                    pgt = need_pgt(addr);
-               mmu_updates[i].ptr = virt_to_mach(pgt);
- mmu_updates[i].val = ((pgentry_t)(f[(done + i) * stride] + (done + i) * increment) << PAGE_SHIFT) | prot;
-           }
+            for ( i = 0; i < todo; i++, va += PAGE_SIZE, pgt++)
+            {
+                if ( !pgt || !(va & L1_MASK) )
+                    pgt = need_pgt(va);
+
+ mmu_updates[i].ptr = virt_to_mach(pgt) | MMU_NORMAL_PT_UPDATE; + mmu_updates[i].val = ((pgentry_t)(mfns[(done + i) * stride] +
+                                                  (done + i) * incr)
+                                      << PAGE_SHIFT) | prot;
+            }

-           rc = HYPERVISOR_mmu_update(mmu_updates, todo, NULL, id);
-           if (rc < 0) {
-               if (may_fail)
-                   f[done * stride] |= 0xF0000000;
-               else {
- printk("Map %ld (%lx, ...) at %p failed: %d.\n", todo, f[done * stride] + done * increment, addr, rc);
+            rc = HYPERVISOR_mmu_update(mmu_updates, todo, NULL, id);
+            if ( rc < 0 )
+            {
+                if (may_fail)
+                    mfns[done * stride] |= 0xF0000000;
+                else {
+                    printk("Map %ld (%lx, ...) at %p failed: %d.\n",
+ todo, mfns[done * stride] + done * incr, va, rc);
                     do_exit();
-               }
-           }
-       }
-
-       done += todo;
+                }
+            }
+        }
+        done += todo;
     }
 }

-unsigned long allocate_ondemand(unsigned long n, unsigned long alignment)
+/*
+ * Map an array of MFNs contiguous into virtual address space. Virtual
+ * addresses are allocated from the on demand area.
+ */
+void *map_frames_ex(unsigned long *mfns, unsigned long n,
+                    unsigned long stride, unsigned long incr,
+                    unsigned long alignment,
+                    domid_t id, int may_fail, unsigned long prot)
 {
-    unsigned long x;
-    unsigned long y = 0;
+    unsigned long va = allocate_ondemand(n, alignment);

-    /* Find a properly aligned run of n contiguous frames */
- for (x = 0; x <= DEMAND_MAP_PAGES - n; x = (x + y + 1 + alignment - 1) & ~(alignment - 1)) {
-        unsigned long addr = demand_map_area_start + x * PAGE_SIZE;
-        pgentry_t *pgt = get_pgt(addr);
-        for (y = 0; y < n; y++, addr += PAGE_SIZE) {
-            if (!(addr & L1_MASK))
-                pgt = get_pgt(addr);
-            if (pgt) {
-                if (*pgt & _PAGE_PRESENT)
-                    break;
-                pgt++;
-            }
-        }
-        if (y == n)
-            break;
-    }
-    if (y != n) {
-        printk("Failed to find %ld frames!\n", n);
-        return 0;
-    }
-    return demand_map_area_start + x * PAGE_SIZE;
-}
-
-void *map_frames_ex(unsigned long *f, unsigned long n, unsigned long stride,
-       unsigned long increment, unsigned long alignment, domid_t id,
-       int may_fail, unsigned long prot)
-{
-    unsigned long addr = allocate_ondemand(n, alignment);
-
-    if (!addr)
+    if ( !va )
         return NULL;

-    /* Found it at x.  Map it in. */
-    do_map_frames(addr, f, n, stride, increment, id, may_fail, prot);
+    do_map_frames(va, mfns, n, stride, incr, id, may_fail, prot);

-    return (void *)addr;
+    return (void *)va;
 }

 /*
@@ -632,15 +713,19 @@
 }


+/*
+ * Clear some of the bootstrap memory
+ */
 static void clear_bootstrap(void)
 {
     pte_t nullpte = { };
+    int rc;

     /* Use first page as the CoW zero page */
     memset(&_text, 0, PAGE_SIZE);
     mfn_zero = virt_to_mfn((unsigned long) &_text);
-    if (HYPERVISOR_update_va_mapping(0, nullpte, UVMF_INVLPG))
-       printk("Unable to unmap NULL page\n");
+    if ( (rc = HYPERVISOR_update_va_mapping(0, nullpte, UVMF_INVLPG)) )
+        printk("Unable to unmap NULL page. rc=%d\n", rc);
 }

 void arch_init_p2m(unsigned long max_pfn)
@@ -665,19 +750,19 @@
     unsigned long pfn;

     l3_list = (unsigned long *)alloc_page();
-    for(pfn=0; pfn<max_pfn; pfn++)
+    for ( pfn=0; pfn<max_pfn; pfn++ )
     {
-        if(!(pfn % (L1_P2M_ENTRIES * L2_P2M_ENTRIES)))
+        if ( !(pfn % (L1_P2M_ENTRIES * L2_P2M_ENTRIES)) )
         {
             l2_list = (unsigned long*)alloc_page();
-            if((pfn >> L3_P2M_SHIFT) > 0)
+            if ( (pfn >> L3_P2M_SHIFT) > 0 )
             {
                 printk("Error: Too many pfns.\n");
                 do_exit();
             }
             l3_list[(pfn >> L2_P2M_SHIFT)] = virt_to_mfn(l2_list);
         }
-        if(!(pfn % (L1_P2M_ENTRIES)))
+        if ( !(pfn % (L1_P2M_ENTRIES)) )
         {
             l1_list = (unsigned long*)alloc_page();
             l2_list[(pfn >> L1_P2M_SHIFT) & L2_P2M_MASK] =
@@ -696,25 +781,25 @@

     unsigned long start_pfn, max_pfn, virt_pfns;

-    printk("  _text:        %p\n", &_text);
-    printk("  _etext:       %p\n", &_etext);
-    printk("  _erodata:     %p\n", &_erodata);
-    printk("  _edata:       %p\n", &_edata);
-    printk("  stack start:  %p\n", stack);
-    printk("  _end:         %p\n", &_end);
+    printk("      _text: %p(VA)\n", &_text);
+    printk("     _etext: %p(VA)\n", &_etext);
+    printk("   _erodata: %p(VA)\n", &_erodata);
+    printk("     _edata: %p(VA)\n", &_edata);
+    printk("stack start: %p(VA)\n", stack);
+    printk("       _end: %p(VA)\n", &_end);

/* First page follows page table pages and 3 more pages (store page etc) */
     start_pfn = PFN_UP(to_phys(start_info.pt_base)) +
-                start_info.nr_pt_frames + 3;
+        start_info.nr_pt_frames + 3;
     max_pfn = start_info.nr_pages;

     /* We need room for demand mapping and heap, clip available memory */
     virt_pfns = DEMAND_MAP_PAGES + HEAP_PAGES;
-    if (max_pfn + virt_pfns + 1 < max_pfn)
+    if ( max_pfn + virt_pfns + 1 < max_pfn )
         max_pfn = -(virt_pfns + 1);

-    printk("  start_pfn:    %lx\n", start_pfn);
-    printk("  max_pfn:      %lx\n", max_pfn);
+    printk("  start_pfn: %lx\n", start_pfn);
+    printk("    max_pfn: %lx\n", max_pfn);

     build_pagetable(&start_pfn, &max_pfn);
     clear_bootstrap();
diff -r 21b2f5f70aaa extras/mini-os/arch/x86/setup.c
--- a/extras/mini-os/arch/x86/setup.c   Thu Feb 19 16:41:22 2009 +0000
+++ b/extras/mini-os/arch/x86/setup.c   Thu Feb 19 17:23:16 2009 +0000
@@ -63,10 +63,12 @@
 static
 shared_info_t *map_shared_info(unsigned long pa)
 {
-       if ( HYPERVISOR_update_va_mapping(
-               (unsigned long)shared_info, __pte(pa | 7), UVMF_INVLPG) )
+    int rc;
+
+       if ( (rc = HYPERVISOR_update_va_mapping(
+              (unsigned long)shared_info, __pte(pa | 7), UVMF_INVLPG)) )
        {
-               printk("Failed to map shared_info!!\n");
+               printk("Failed to map shared_info!! rc=%d\n", rc);
                do_exit();
        }
        return (shared_info_t *)shared_info;
diff -r 21b2f5f70aaa extras/mini-os/kernel.c
--- a/extras/mini-os/kernel.c   Thu Feb 19 16:41:22 2009 +0000
+++ b/extras/mini-os/kernel.c   Thu Feb 19 17:23:16 2009 +0000
@@ -490,14 +490,16 @@

     /* print out some useful information  */
     printk("Xen Minimal OS!\n");
-    printk("start_info:   %p\n",    si);
-    printk("  nr_pages:   %lu",     si->nr_pages);
-    printk("  shared_inf: %08lx\n", si->shared_info);
-    printk("  pt_base:    %p",      (void *)si->pt_base);
-    printk("  mod_start:  0x%lx\n", si->mod_start);
-    printk("  mod_len:    %lu\n",   si->mod_len);
-    printk("  flags:      0x%x\n",  (unsigned int)si->flags);
-    printk("  cmd_line:   %s\n",
+    printk("  start_info: %p(VA)\n", si);
+    printk("    nr_pages: 0x%lx\n", si->nr_pages);
+    printk("  shared_inf: 0x%08lx(MA)\n", si->shared_info);
+    printk("     pt_base: %p(VA)\n", (void *)si->pt_base);
+    printk("nr_pt_frames: 0x%lx\n", si->nr_pt_frames);
+    printk("    mfn_list: %p(VA)\n", (void *)si->mfn_list);
+    printk("   mod_start: 0x%lx(VA)\n", si->mod_start);
+    printk("     mod_len: %lu\n", si->mod_len);
+    printk("       flags: 0x%x\n", (unsigned int)si->flags);
+    printk("    cmd_line: %s\n",
            si->cmd_line ? (const char *)si->cmd_line : "NULL");

     /* Set up events. */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.