[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [patch] libxc x86-64



ChangeSet 1.1669, 2005/06/03 22:50:54+01:00, iap10@xxxxxxxxxxxxxxxxxxxxx

        [patch] libxc x86-64
        I've redone the patch to add domU launching support to libxc for x86-64.
        
        Signed-off-by: Jerone Young <jyoung5@xxxxxxxxxx>
        Signed-off-by: ian@xxxxxxxxxxxxx



 xc_linux_build.c |  165 +++++++++++++++++++++++++++++++++++++++++++++++--------
 xc_private.h     |   32 ++++++++++
 2 files changed, 173 insertions(+), 24 deletions(-)


diff -Nru a/tools/libxc/xc_linux_build.c b/tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c      2005-06-03 18:03:15 -04:00
+++ b/tools/libxc/xc_linux_build.c      2005-06-03 18:03:15 -04:00
@@ -3,13 +3,32 @@
  */
 
 #include "xc_private.h"
+
+#if defined(__i386__)
 #define ELFSIZE 32
+#endif
+
+#if defined(__x86_64__)
+#define ELFSIZE 64
+#endif
+
+
 #include "xc_elf.h"
 #include <stdlib.h>
 #include <zlib.h>
 
+#if defined(__i386__)
 #define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED)
 #define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#endif
+
+#if defined(__x86_64__)
+#define L1_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_USER)
+#define L2_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#define L3_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#define L4_PROT (_PAGE_PRESENT|_PAGE_RW|_PAGE_ACCESSED|_PAGE_DIRTY|_PAGE_USER)
+#endif
+
 
 #define round_pgup(_p)    (((_p)+(PAGE_SIZE-1))&PAGE_MASK)
 #define round_pgdown(_p)  ((_p)&PAGE_MASK)
@@ -54,9 +73,17 @@
 {
     l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
     l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
+#if defined(__x86_64__)
+    l3_pgentry_t *vl3tab=NULL, *vl3e=NULL;
+    l4_pgentry_t *vl4tab=NULL, *vl4e=NULL;
+#endif
     unsigned long *page_array = NULL;
-    unsigned long l2tab;
-    unsigned long l1tab;
+    unsigned long l2tab = 0;
+    unsigned long l1tab = 0;
+#if defined(__x86_64__)
+    unsigned long l3tab = 0;
+    unsigned long l4tab = 0;
+#endif
     unsigned long count, i;
     start_info_t *start_info;
     shared_info_t *shared_info;
@@ -111,30 +138,45 @@
         vstartinfo_end   = vstartinfo_start + PAGE_SIZE;
         vstack_start     = vstartinfo_end;
         vstack_end       = vstack_start + PAGE_SIZE;
-        v_end            = (vstack_end + (1<<22)-1) & ~((1<<22)-1);
-        if ( (v_end - vstack_end) < (512 << 10) )
-            v_end += 1 << 22; /* Add extra 4MB to get >= 512kB padding. */
+        v_end            = (vstack_end + (1UL<<22)-1) & ~((1UL<<22)-1);
+        if ( (v_end - vstack_end) < (512UL << 10) )
+            v_end += 1UL << 22; /* Add extra 4MB to get >= 512kB padding. */
+#if defined(__i386__)
         if ( (((v_end - dsi.v_start + ((1<<L2_PAGETABLE_SHIFT)-1)) >> 
                L2_PAGETABLE_SHIFT) + 1) <= nr_pt_pages )
             break;
+#endif
+#if defined(__x86_64__)
+#define NR(_l,_h,_s) \
+    (((((_h) + ((1UL<<(_s))-1)) & ~((1UL<<(_s))-1)) - \
+    ((_l) & ~((1UL<<(_s))-1))) >> (_s))
+    if ( (1 + /* # L4 */
+        NR(dsi.v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
+        NR(dsi.v_start, v_end, L3_PAGETABLE_SHIFT) + /* # L2 */
+        NR(dsi.v_start, v_end, L2_PAGETABLE_SHIFT))  /* # L1 */
+        <= nr_pt_pages )
+            break;
+#endif
     }
 
+#define _p(a) ((void *) (a))
+
     printf("VIRTUAL MEMORY ARRANGEMENT:\n"
-           " Loaded kernel: %08lx->%08lx\n"
-           " Init. ramdisk: %08lx->%08lx\n"
-           " Phys-Mach map: %08lx->%08lx\n"
-           " Page tables:   %08lx->%08lx\n"
-           " Start info:    %08lx->%08lx\n"
-           " Boot stack:    %08lx->%08lx\n"
-           " TOTAL:         %08lx->%08lx\n",
-           dsi.v_kernstart, dsi.v_kernend, 
-           vinitrd_start, vinitrd_end,
-           vphysmap_start, vphysmap_end,
-           vpt_start, vpt_end,
-           vstartinfo_start, vstartinfo_end,
-           vstack_start, vstack_end,
-           dsi.v_start, v_end);
-    printf(" ENTRY ADDRESS: %08lx\n", dsi.v_kernentry);
+           " Loaded kernel: %p->%p\n"
+           " Init. ramdisk: %p->%p\n"
+           " Phys-Mach map: %p->%p\n"
+           " Page tables:   %p->%p\n"
+           " Start info:    %p->%p\n"
+           " Boot stack:    %p->%p\n"
+           " TOTAL:         %p->%p\n",
+           _p(dsi.v_kernstart), _p(dsi.v_kernend), 
+           _p(vinitrd_start), _p(vinitrd_end),
+           _p(vphysmap_start), _p(vphysmap_end),
+           _p(vpt_start), _p(vpt_end),
+           _p(vstartinfo_start), _p(vstartinfo_end),
+           _p(vstack_start), _p(vstack_end),
+           _p(dsi.v_start), _p(v_end));
+    printf(" ENTRY ADDRESS: %p\n", _p(dsi.v_kernentry));
 
     if ( (v_end - dsi.v_start) > (nr_pages * PAGE_SIZE) )
     {
@@ -178,6 +220,7 @@
     if ( (mmu = init_mmu_updates(xc_handle, dom)) == NULL )
         goto error_out;
 
+#if defined(__i386__)
     /* First allocate page for page dir. */
     ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
     l2tab = page_array[ppt_alloc++] << PAGE_SHIFT;
@@ -217,6 +260,74 @@
     }
     munmap(vl1tab, PAGE_SIZE);
     munmap(vl2tab, PAGE_SIZE);
+#endif
+#if defined(__x86_64__)
+
+#define alloc_pt(ltab, vltab) \
+        ltab = page_array[ppt_alloc++] << PAGE_SHIFT; \
+        if (vltab != NULL) { \
+            munmap(vltab, PAGE_SIZE); \
+        } \
+        if ((vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE, \
+                          PROT_READ|PROT_WRITE, \
+                          ltab >> PAGE_SHIFT)) == NULL) { \
+            munmap(vltab, PAGE_SIZE); \
+            goto error_out; \
+        } \
+        memset(vltab, 0, PAGE_SIZE);
+
+    /* First allocate page for page dir. */
+    ppt_alloc = (vpt_start - dsi.v_start) >> PAGE_SHIFT;
+    l4tab = page_array[ppt_alloc++] << PAGE_SHIFT;
+    ctxt->pt_base = l4tab;
+    
+    /* Intiliaize page table */
+    if ( (vl4tab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,
+                                        PROT_READ|PROT_WRITE,
+                                        l4tab >> PAGE_SHIFT)) == NULL )
+            goto error_out;
+    memset(vl4tab, 0, PAGE_SIZE);
+    vl4e = &vl4tab[l4_table_offset(dsi.v_start)];
+    
+    for ( count = 0; count < ((v_end-dsi.v_start)>>PAGE_SHIFT); count++)
+    {
+        if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
+        {
+            alloc_pt(l1tab, vl1tab);
+            
+                if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
+                {
+                    alloc_pt(l2tab, vl2tab);
+                    if ( !((unsigned long)vl3e & (PAGE_SIZE-1)) )
+                    {
+                        alloc_pt(l3tab, vl3tab);
+                        vl3e = &vl3tab[l3_table_offset(dsi.v_start + 
(count<<PAGE_SHIFT))];
+                        *vl4e = l3tab | L4_PROT;
+                        vl4e++;
+                    }
+                    vl2e = &vl2tab[l2_table_offset(dsi.v_start + 
(count<<PAGE_SHIFT))];
+                    *vl3e = l2tab | L3_PROT;
+                    vl3e++;
+                }
+            vl1e = &vl1tab[l1_table_offset(dsi.v_start + (count<<PAGE_SHIFT))];
+            *vl2e = l1tab | L2_PROT;
+            vl2e++;
+        }
+        
+        *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
+        if ( (count >= ((vpt_start-dsi.v_start)>>PAGE_SHIFT)) &&
+            (count <  ((vpt_end  -dsi.v_start)>>PAGE_SHIFT)) ) 
+        {
+                *vl1e &= ~_PAGE_RW;
+        }
+            vl1e++;
+    }
+     
+    munmap(vl1tab, PAGE_SIZE);
+    munmap(vl2tab, PAGE_SIZE);
+    munmap(vl3tab, PAGE_SIZE);
+    munmap(vl4tab, PAGE_SIZE);
+#endif
 
     /* Write the phys->machine and machine->phys table entries. */
     physmap_pfn = (vphysmap_start - dsi.v_start) >> PAGE_SHIFT;
@@ -243,13 +354,23 @@
     }
     munmap(physmap, PAGE_SIZE);
     
+#if defined(__i386__)
     /*
      * Pin down l2tab addr as page dir page - causes hypervisor to provide
      * correct protection for the page
      */ 
     if ( pin_table(xc_handle, MMUEXT_PIN_L2_TABLE, l2tab>>PAGE_SHIFT, dom) )
         goto error_out;
+#endif
 
+#if defined(__x86_64__)
+    /*
+     * Pin down l4tab addr as page dir page - causes hypervisor to  provide
+     * correct protection for the page
+     */
+     if ( pin_table(xc_handle, MMUEXT_PIN_L4_TABLE, l4tab>>PAGE_SHIFT, dom) )
+        goto error_out;
+#endif
     start_info = xc_map_foreign_range(
         xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
         page_array[(vstartinfo_start-dsi.v_start)>>PAGE_SHIFT]);
@@ -409,7 +530,7 @@
     ctxt->user_regs.es = FLAT_KERNEL_DS;
     ctxt->user_regs.fs = FLAT_KERNEL_DS;
     ctxt->user_regs.gs = FLAT_KERNEL_DS;
-    ctxt->user_regs.ss = FLAT_KERNEL_DS;
+    ctxt->user_regs.ss = FLAT_KERNEL_SS;
     ctxt->user_regs.cs = FLAT_KERNEL_CS;
     ctxt->user_regs.eip = vkern_entry;
     ctxt->user_regs.esp = vstartinfo_start + 2*PAGE_SIZE;
@@ -433,7 +554,7 @@
     ctxt->gdt_ents = 0;
 
     /* Ring 1 stack is the initial stack. */
-    ctxt->kernel_ss = FLAT_KERNEL_DS;
+    ctxt->kernel_ss = FLAT_KERNEL_SS;
     ctxt->kernel_sp = vstartinfo_start + 2*PAGE_SIZE;
 
     /* No debugging. */
diff -Nru a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  2005-06-03 18:03:15 -04:00
+++ b/tools/libxc/xc_private.h  2005-06-03 18:03:15 -04:00
@@ -29,12 +29,25 @@
 #define _PAGE_PSE       0x080

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.