[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [Patch][RFC] fix xc_ptrace_core for gdbserver-xen on x86_64.



Hi, x86_64 people

I make a patch to fix xc_ptrace_core for gdbserver-xen on x86_64.
But I'm not familiar with x86_64.
Please review it.

Signed-off-by: Akio Takebe <takebe_akio@xxxxxxxxxxxxxx>

diff -r b688d4a68a3e tools/libxc/xc_ptrace_core.c
--- a/tools/libxc/xc_ptrace_core.c      Tue Aug 22 14:59:16 2006 +0100
+++ b/tools/libxc/xc_ptrace_core.c      Fri Aug 25 12:22:19 2006 +0900
@@ -9,9 +9,8 @@
 
 /* XXX application state */
 
-static long   nr_pages = 0;
+static unsigned long   nr_pages = 0;
 static unsigned long  *p2m_array = NULL;
-static unsigned long  *m2p_array = NULL;
 static unsigned long   pages_offset;
 static unsigned long   cr3[MAX_VIRT_CPUS];
 
@@ -20,11 +19,18 @@ static unsigned long
 static unsigned long
 map_mtop_offset(unsigned long ma)
 {
-    return pages_offset + (m2p_array[ma >> PAGE_SHIFT] << PAGE_SHIFT);
-    return 0;
-}
-
-
+
+    unsigned long page_num;
+    for(page_num=0; page_num <nr_pages; page_num++){
+        if((ma >> PAGE_SHIFT) == p2m_array[page_num])
+               break;
+    }
+
+    return pages_offset + (page_num << PAGE_SHIFT);
+}
+
+
+#if defined(__i386__)
 void *
 map_domain_va_core(unsigned long domfd, int cpu, void * guest_va,
                         vcpu_guest_context_t *ctxt)
@@ -93,6 +99,122 @@ map_domain_va_core(unsigned long domfd, 
     }
     return (void *)(((unsigned long)page_virt[cpu]) | (va & 
BSD_PAGE_MASK));
 }
+
+#elif defined(__x86_64__)
+void *
+map_domain_va_core(unsigned long domfd, int cpu, void * guest_va,
+                        vcpu_guest_context_t *ctxt)
+{
+    unsigned long pgd, pud, pmd, page;
+    unsigned long va = (unsigned long)guest_va;
+    void *v;
+
+    static unsigned long  cr3_phys[MAX_VIRT_CPUS];
+    static unsigned long *cr3_virt[MAX_VIRT_CPUS];
+    static unsigned long  pgd_phys[MAX_VIRT_CPUS];
+    static unsigned long *pgd_virt[MAX_VIRT_CPUS];
+    static unsigned long  pud_phys[MAX_VIRT_CPUS];
+    static unsigned long *pud_virt[MAX_VIRT_CPUS];
+    static unsigned long  pmd_phys[MAX_VIRT_CPUS];
+    static unsigned long *pmd_virt[MAX_VIRT_CPUS];
+    static unsigned long  page_phys[MAX_VIRT_CPUS];
+    static unsigned long *page_virt[MAX_VIRT_CPUS];
+
+    if (cr3[cpu] != cr3_phys[cpu])
+    {
+        cr3_phys[cpu] = cr3[cpu];
+        if (cr3_virt[cpu])
+            munmap(cr3_virt[cpu], PAGE_SIZE);
+        v = mmap(
+            NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
+            map_mtop_offset(xen_cr3_to_pfn(cr3_phys[cpu])));
+        if (v == MAP_FAILED)
+        {
+            perror("mmap failed");
+            return NULL;
+        }
+        cr3_virt[cpu] = v;
+    }
+
+    /* 4 level */
+    if ((pgd = cr3_virt[cpu][l4_table_offset(va)]) == 0) /* logical 
address */
+        return NULL;
+    if (ctxt[cpu].flags & VGCF_HVM_GUEST)
+        pgd = p2m_array[pgd >> PAGE_SHIFT] << PAGE_SHIFT;
+    if (pgd != pgd_phys[cpu])
+    {
+        pgd_phys[cpu] = pgd;
+        if (pgd_virt[cpu])
+            munmap(pgd_virt[cpu], PAGE_SIZE);
+        v = mmap(
+            NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
+            map_mtop_offset(pgd_phys[cpu]));
+        if (v == MAP_FAILED)
+            return NULL;
+        pgd_virt[cpu] = v;
+    }
+
+    /* 3 level */
+    if ((pud = pgd_virt[cpu][l3_table_offset(va)]) == 0) /* logical 
address */
+        return NULL;
+    if (ctxt[cpu].flags & VGCF_HVM_GUEST)
+        pud = p2m_array[pud >> PAGE_SHIFT] << PAGE_SHIFT;
+    if (pud != pud_phys[cpu])
+    {
+        pud_phys[cpu] = pud;
+        if (pud_virt[cpu])
+            munmap(pud_virt[cpu], PAGE_SIZE);
+        v = mmap(
+            NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
+            map_mtop_offset(pud_phys[cpu]));
+        if (v == MAP_FAILED)
+            return NULL;
+        pud_virt[cpu] = v;
+    }
+
+
+    /* 2 level */
+    if ((pmd = pud_virt[cpu][l2_table_offset(va)]) == 0) /* logical 
address */
+        return NULL;
+    if (ctxt[cpu].flags & VGCF_HVM_GUEST)
+        pmd = p2m_array[pmd >> PAGE_SHIFT] << PAGE_SHIFT;
+    if (pmd != pmd_phys[cpu])
+    {
+        pmd_phys[cpu] = pmd;
+        if (pmd_virt[cpu])
+            munmap(pmd_virt[cpu], PAGE_SIZE);
+        v = mmap(
+            NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
+            map_mtop_offset(pmd_phys[cpu]));
+        if (v == MAP_FAILED)
+            return NULL;
+        pmd_virt[cpu] = v;
+    }
+
+
+    if ((page = pmd_virt[cpu][l1_table_offset(va)]) == 0) /* logical 
address */
+        return NULL;
+    if (ctxt[cpu].flags & VGCF_HVM_GUEST)
+        page = p2m_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
+    if (page != page_phys[cpu])
+    {
+        page_phys[cpu] = page;
+        if (page_virt[cpu])
+            munmap(page_virt[cpu], PAGE_SIZE);
+        v = mmap(
+            NULL, PAGE_SIZE, PROT_READ, MAP_PRIVATE, domfd,
+            map_mtop_offset(page_phys[cpu]));
+        if (v == MAP_FAILED)
+        {
+            IPRINTF("cr3 %lx pmd %lx page %lx pti %lx\n", cr3[cpu], pmd, 
page, l1_table_offset(va));
+            page_phys[cpu] = 0;
+            return NULL;
+        }
+        page_virt[cpu] = v;
+    }
+    return (void *)(((unsigned long)page_virt[cpu]) | (va & 
BSD_PAGE_MASK));
+}
+#endif
 
 int
 xc_waitdomain_core(
@@ -140,15 +262,6 @@ xc_waitdomain_core(
             sizeof(unsigned long)*nr_pages)
             return -1;
 
-        if ((m2p_array = malloc((1<<20) * sizeof(unsigned long))) == 
NULL)
-        {
-            IPRINTF("Could not allocate m2p array\n");
-            return -1;
-        }
-        bzero(m2p_array, sizeof(unsigned long)* 1 << 20);
-
-        for (i = 0; i < nr_pages; i++)
-            m2p_array[p2m_array[i]] = i;
     }
     return 0;
 }


Best Regards,

Akio Takebe

Attachment: fix_xc_ptrace_core.patch
Description: Binary data

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.