[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Merged.



# HG changeset patch
# User emellor@xxxxxxxxxxxxxxxxxxxxxx
# Node ID f37f1c9ec2ec97c10c81a26014121a47586d9732
# Parent  b67f9f21fd9c2a49bb11681638c7b7c70006002b
# Parent  bf09a8db5bb477b7c823021d8ac65dfc45b07a12
Merged.

diff -r b67f9f21fd9c -r f37f1c9ec2ec 
linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c   Sun Nov 27 01:06:20 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/pgtable.c   Sun Nov 27 01:06:44 2005
@@ -278,26 +278,22 @@
        unsigned long flags;
 
        if (PTRS_PER_PMD > 1) {
-#ifdef CONFIG_XEN
                /* Ensure pgd resides below 4GB. */
                int rc = xen_create_contiguous_region(
                        (unsigned long)pgd, 0, 32);
                BUG_ON(rc);
-#endif
                if (HAVE_SHARED_KERNEL_PMD)
                        memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
-                              swapper_pg_dir, sizeof(pgd_t));
+                              swapper_pg_dir + USER_PTRS_PER_PGD,
+                              (PTRS_PER_PGD - USER_PTRS_PER_PGD) * 
sizeof(pgd_t));
        } else {
-               if (!HAVE_SHARED_KERNEL_PMD)
-                       spin_lock_irqsave(&pgd_lock, flags);
+               spin_lock_irqsave(&pgd_lock, flags);
                memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
                       swapper_pg_dir + USER_PTRS_PER_PGD,
                       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
                memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-               if (!HAVE_SHARED_KERNEL_PMD) {
-                       pgd_list_add(pgd);
-                       spin_unlock_irqrestore(&pgd_lock, flags);
-               }
+               pgd_list_add(pgd);
+               spin_unlock_irqrestore(&pgd_lock, flags);
        }
 }
 
@@ -305,9 +301,6 @@
 void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
 {
        unsigned long flags; /* can be called from interrupt context */
-
-       if (HAVE_SHARED_KERNEL_PMD)
-               return;
 
        spin_lock_irqsave(&pgd_lock, flags);
        pgd_list_del(pgd);
@@ -335,18 +328,24 @@
 
        if (!HAVE_SHARED_KERNEL_PMD) {
                unsigned long flags;
-               pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET);
-               pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET);
-               pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET);
-               pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-               ++i;
-               if (!pmd)
-                       goto out_oom;
+
+               for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
+                       pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
+                       if (!pmd)
+                               goto out_oom;
+                       set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
+               }
 
                spin_lock_irqsave(&pgd_lock, flags);
-               memcpy(pmd, copy_pmd, PAGE_SIZE);
-               make_lowmem_page_readonly(pmd);
-               set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
+               for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
+                       unsigned long v = (unsigned long)i << PGDIR_SHIFT;
+                       pgd_t *kpgd = pgd_offset_k(v);
+                       pud_t *kpud = pud_offset(kpgd, v);
+                       pmd_t *kpmd = pmd_offset(kpud, v);
+                       pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
+                       memcpy(pmd, kpmd, PAGE_SIZE);
+                       make_lowmem_page_readonly(pmd);
+               }
                pgd_list_add(pgd);
                spin_unlock_irqrestore(&pgd_lock, flags);
        }
@@ -374,13 +373,15 @@
                }
                if (!HAVE_SHARED_KERNEL_PMD) {
                        unsigned long flags;
-                       pmd_t *pmd = (void 
*)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
                        spin_lock_irqsave(&pgd_lock, flags);
                        pgd_list_del(pgd);
                        spin_unlock_irqrestore(&pgd_lock, flags);
-                       make_lowmem_page_writable(pmd);
-                       memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-                       kmem_cache_free(pmd_cache, pmd);
+                       for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
+                               pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
+                               make_lowmem_page_writable(pmd);
+                               memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
+                               kmem_cache_free(pmd_cache, pmd);
+                       }
                }
        }
        /* in the non-PAE case, free_pgtables() clears user pgd entries */
diff -r b67f9f21fd9c -r f37f1c9ec2ec patches/linux-2.6.12/pmd-shared.patch
--- a/patches/linux-2.6.12/pmd-shared.patch     Sun Nov 27 01:06:20 2005
+++ b/patches/linux-2.6.12/pmd-shared.patch     Sun Nov 27 01:06:44 2005
@@ -11,14 +11,20 @@
  
        spin_lock_irqsave(&pgd_lock, flags);
 diff -urNpP linux-2.6.12/arch/i386/mm/pgtable.c 
linux-2.6.12.new/arch/i386/mm/pgtable.c
---- linux-2.6.12/arch/i386/mm/pgtable.c        2005-11-24 21:51:49.000000000 
+0000
-+++ linux-2.6.12.new/arch/i386/mm/pgtable.c    2005-11-24 22:06:04.000000000 
+0000
-@@ -199,19 +199,22 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
+--- linux-2.6.12/arch/i386/mm/pgtable.c        2005-11-26 09:55:10.000000000 
+0000
++++ linux-2.6.12.new/arch/i386/mm/pgtable.c    2005-11-26 10:20:36.000000000 
+0000
+@@ -199,19 +199,20 @@ void pgd_ctor(void *pgd, kmem_cache_t *c
  {
        unsigned long flags;
  
 -      if (PTRS_PER_PMD == 1)
--              spin_lock_irqsave(&pgd_lock, flags);
++      if (PTRS_PER_PMD > 1) {
++              if (HAVE_SHARED_KERNEL_PMD)
++                      memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
++                             swapper_pg_dir + USER_PTRS_PER_PGD,
++                             (PTRS_PER_PGD - USER_PTRS_PER_PGD) * 
sizeof(pgd_t));
++      } else {
+               spin_lock_irqsave(&pgd_lock, flags);
 -
 -      memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
 -                      swapper_pg_dir + USER_PTRS_PER_PGD,
@@ -30,53 +36,40 @@
 -      pgd_list_add(pgd);
 -      spin_unlock_irqrestore(&pgd_lock, flags);
 -      memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-+      if (PTRS_PER_PMD > 1) {
-+              if (HAVE_SHARED_KERNEL_PMD)
-+                      memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
-+                             swapper_pg_dir, sizeof(pgd_t));
-+      } else {
-+              if (!HAVE_SHARED_KERNEL_PMD)
-+                      spin_lock_irqsave(&pgd_lock, flags);
 +              memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
 +                     swapper_pg_dir + USER_PTRS_PER_PGD,
 +                     (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
 +              memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
-+              if (!HAVE_SHARED_KERNEL_PMD) {
-+                      pgd_list_add(pgd);
-+                      spin_unlock_irqrestore(&pgd_lock, flags);
-+              }
++              pgd_list_add(pgd);
++              spin_unlock_irqrestore(&pgd_lock, flags);
 +      }
  }
  
  /* never called when PTRS_PER_PMD > 1 */
-@@ -219,6 +222,9 @@ void pgd_dtor(void *pgd, kmem_cache_t *c
- {
-       unsigned long flags; /* can be called from interrupt context */
- 
-+      if (HAVE_SHARED_KERNEL_PMD)
-+              return;
-+
-       spin_lock_irqsave(&pgd_lock, flags);
-       pgd_list_del(pgd);
-       spin_unlock_irqrestore(&pgd_lock, flags);
-@@ -238,6 +244,24 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
+@@ -238,6 +239,30 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
                        goto out_oom;
                set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
        }
 +
 +      if (!HAVE_SHARED_KERNEL_PMD) {
 +              unsigned long flags;
-+              pgd_t *copy_pgd = pgd_offset_k(PAGE_OFFSET);
-+              pud_t *copy_pud = pud_offset(copy_pgd, PAGE_OFFSET);
-+              pmd_t *copy_pmd = pmd_offset(copy_pud, PAGE_OFFSET);
-+              pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
-+                ++i;
-+              if (!pmd)
-+                      goto out_oom;
++
++              for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++                      pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++                      if (!pmd)
++                              goto out_oom;
++                      set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
++              }
 +
 +              spin_lock_irqsave(&pgd_lock, flags);
-+              memcpy(pmd, copy_pmd, PAGE_SIZE);
-+              set_pgd(&pgd[USER_PTRS_PER_PGD], __pgd(1 + __pa(pmd)));
++              for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++                      unsigned long v = (unsigned long)i << PGDIR_SHIFT;
++                      pgd_t *kpgd = pgd_offset_k(v);
++                      pud_t *kpud = pud_offset(kpgd, v);
++                      pmd_t *kpmd = pmd_offset(kpud, v);
++                      pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++                      memcpy(pmd, kpmd, PAGE_SIZE);
++              }
 +              pgd_list_add(pgd);
 +              spin_unlock_irqrestore(&pgd_lock, flags);
 +      }
@@ -84,7 +77,7 @@
        return pgd;
  
  out_oom:
-@@ -252,9 +276,21 @@ void pgd_free(pgd_t *pgd)
+@@ -252,9 +277,23 @@ void pgd_free(pgd_t *pgd)
        int i;
  
        /* in the PAE case user pgd entries are overwritten before usage */
@@ -98,12 +91,14 @@
 +              }
 +              if (!HAVE_SHARED_KERNEL_PMD) {
 +                      unsigned long flags;
-+                      pmd_t *pmd = (void 
*)__va(pgd_val(pgd[USER_PTRS_PER_PGD])-1);
 +                      spin_lock_irqsave(&pgd_lock, flags);
 +                      pgd_list_del(pgd);
 +                      spin_unlock_irqrestore(&pgd_lock, flags);
-+                      memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
-+                      kmem_cache_free(pmd_cache, pmd);
++                      for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++                              pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++                              memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++                              kmem_cache_free(pmd_cache, pmd);
++                      }
 +              }
 +      }
        /* in the non-PAE case, free_pgtables() clears user pgd entries */
diff -r b67f9f21fd9c -r f37f1c9ec2ec tools/libxc/xc_linux_save.c
--- a/tools/libxc/xc_linux_save.c       Sun Nov 27 01:06:20 2005
+++ b/tools/libxc/xc_linux_save.c       Sun Nov 27 01:06:44 2005
@@ -457,6 +457,15 @@
             xen_start = (hvirt_start >> L2_PAGETABLE_SHIFT_PAE) & 0x1ff; 
     }
 
+    if (pt_levels == 4 && type == L4TAB) { 
+        /*
+        ** XXX SMH: should compute these from hvirt_start (which we have) 
+        ** and hvirt_end (which we don't) 
+        */
+        xen_start = 256; 
+        xen_end   = 272; 
+    }
+
     /* Now iterate through the page table, canonicalizing each PTE */
     for (i = 0; i < pte_last; i++ ) {
 
@@ -721,12 +730,6 @@
     }
 
     /* Domain is still running at this point */
-
-    if (live && (pt_levels == 4)) {
-        ERR("Live migration not supported for 64-bit guests");
-        live = 0;
-    }
-
     if (live) {
 
         if (xc_shadow_control(xc_handle, dom, 
@@ -811,7 +814,7 @@
         for (i = 0; i < max_pfn; i++) {
 
             mfn = live_p2m[i];
-            if((mfn != 0xffffffffUL) && (mfn_to_pfn(mfn) != i)) { 
+            if((mfn != INVALID_P2M_ENTRY) && (mfn_to_pfn(mfn) != i)) { 
                 DPRINTF("i=0x%x mfn=%lx live_m2p=%lx\n", i, 
                         mfn, mfn_to_pfn(mfn));
                 err++;
diff -r b67f9f21fd9c -r f37f1c9ec2ec tools/python/xen/xend/XendCheckpoint.py
--- a/tools/python/xen/xend/XendCheckpoint.py   Sun Nov 27 01:06:20 2005
+++ b/tools/python/xen/xend/XendCheckpoint.py   Sun Nov 27 01:06:44 2005
@@ -128,7 +128,7 @@
     try:
         l = read_exact(fd, sizeof_unsigned_long,
                        "not a valid guest state file: pfn count read")
-        nr_pfns = unpack("=L", l)[0]   # XXX endianess
+        nr_pfns = unpack("L", l)[0]    # native sizeof long
         if nr_pfns > 16*1024*1024:     # XXX 
             raise XendError(
                 "not a valid guest state file: pfn count out of range")
diff -r b67f9f21fd9c -r f37f1c9ec2ec xen/arch/x86/x86_64/mm.c
--- a/xen/arch/x86/x86_64/mm.c  Sun Nov 27 01:06:20 2005
+++ b/xen/arch/x86/x86_64/mm.c  Sun Nov 27 01:06:44 2005
@@ -190,7 +190,9 @@
         if ( copy_from_user(&xmml, arg, sizeof(xmml)) )
             return -EFAULT;
 
-        for ( v = RDWR_MPT_VIRT_START; v != RDWR_MPT_VIRT_END; v += 1 << 21 )
+        for ( i = 0, v = RDWR_MPT_VIRT_START;
+              (i != xmml.max_extents) && (v != RDWR_MPT_VIRT_END);
+              i++, v += 1 << 21 )
         {
             l3e = l4e_to_l3e(idle_pg_table[l4_table_offset(v)])[
                 l3_table_offset(v)];
@@ -200,11 +202,8 @@
             if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) )
                 break;
             mfn = l2e_get_pfn(l2e) + l1_table_offset(v);
-            if ( i == xmml.max_extents )
-                break;
             if ( put_user(mfn, &xmml.extent_start[i]) )
                 return -EFAULT;
-            i++;
         }
 
         if ( put_user(i, &((struct xen_machphys_mfn_list *)arg)->nr_extents) )

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.