[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] merge with xen-unstable.hg



# HG changeset patch
# User awilliam@xxxxxxxxxxxx
# Node ID c0d41ac21486b69a3b81ac5692c8e6ae0592a789
# Parent  e948333c2c3865e617e9f5fb1098b0914388734e
# Parent  37141c3a3d39956ad5faf2d4e2a91276eaca557b
merge with xen-unstable.hg
---
 linux-2.6-xen-sparse/drivers/xen/netback/netback.c |    5 
 tools/libxc/xc_linux_build.c                       |  176 ++++++++-------------
 xen/arch/x86/mm.c                                  |   21 --
 3 files changed, 74 insertions(+), 128 deletions(-)

diff -r e948333c2c38 -r c0d41ac21486 
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Tue Dec 12 
15:23:51 2006 -0700
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Tue Dec 12 
15:25:02 2006 -0700
@@ -183,7 +183,7 @@ static struct sk_buff *netbk_copy_skb(st
 
        BUG_ON(skb_shinfo(skb)->frag_list != NULL);
 
-       nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC);
+       nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
        if (unlikely(!nskb))
                goto err;
 
@@ -1224,7 +1224,8 @@ static void net_tx_action(unsigned long 
                            ret < MAX_SKB_FRAGS) ?
                        PKT_PROT_LEN : txreq.size;
 
-               skb = alloc_skb(data_len + 16 + NET_IP_ALIGN, GFP_ATOMIC);
+               skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
+                               GFP_ATOMIC | __GFP_NOWARN);
                if (unlikely(skb == NULL)) {
                        DPRINTK("Can't allocate a skb in start_xmit.\n");
                        netbk_tx_err(netif, &txreq, i);
diff -r e948333c2c38 -r c0d41ac21486 tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c      Tue Dec 12 15:23:51 2006 -0700
+++ b/tools/libxc/xc_linux_build.c      Tue Dec 12 15:25:02 2006 -0700
@@ -175,11 +175,10 @@ static int load_initrd(int xc_handle, do
     return 0;
 }
 
-#define alloc_pt(ltab, vltab, pltab)                                    \
+#define alloc_pt(ltab, vltab)                                           \
 do {                                                                    \
-    pltab = ppt_alloc++;                                                \
-    ltab = (uint64_t)page_array[pltab] << PAGE_SHIFT;                   \
-    pltab <<= PAGE_SHIFT;                                               \
+    ltab = ppt_alloc++;                                                 \
+    ltab = (uint64_t)page_array[ltab] << PAGE_SHIFT;                    \
     if ( vltab != NULL )                                                \
         munmap(vltab, PAGE_SIZE);                                       \
     if ( (vltab = xc_map_foreign_range(xc_handle, dom, PAGE_SIZE,       \
@@ -202,43 +201,32 @@ static int setup_pg_tables(int xc_handle
 {
     l1_pgentry_t *vl1tab=NULL, *vl1e=NULL;
     l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
-    unsigned long l1tab = 0, pl1tab;
-    unsigned long l2tab = 0, pl2tab;
+    unsigned long l1tab = 0;
+    unsigned long l2tab = 0;
     unsigned long ppt_alloc;
     unsigned long count;
 
     ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
-    alloc_pt(l2tab, vl2tab, pl2tab);
+    alloc_pt(l2tab, vl2tab);
     vl2e = &vl2tab[l2_table_offset(dsi_v_start)];
-    if (shadow_mode_enabled)
-        ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl2tab >> PAGE_SHIFT);
-    else
-        ctxt->ctrlreg[3] = xen_pfn_to_cr3(l2tab >> PAGE_SHIFT);
+    ctxt->ctrlreg[3] = xen_pfn_to_cr3(l2tab >> PAGE_SHIFT);
 
     for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++ )
     {
         if ( ((unsigned long)vl1e & (PAGE_SIZE-1)) == 0 )
         {
-            alloc_pt(l1tab, vl1tab, pl1tab);
+            alloc_pt(l1tab, vl1tab);
             vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
-            if (shadow_mode_enabled)
-                *vl2e = pl1tab | L2_PROT;
-            else
-                *vl2e = l1tab | L2_PROT;
-            vl2e++;
-        }
-
-        if ( shadow_mode_enabled )
-        {
-            *vl1e = (count << PAGE_SHIFT) | L1_PROT;
-        }
-        else
-        {
-            *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
+            *vl2e++ = l1tab | L2_PROT;
+        }
+
+        *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
+
+        if ( !shadow_mode_enabled )
             if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
                  (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) )
                 *vl1e &= ~_PAGE_RW;
-        }
+
         vl1e++;
     }
     munmap(vl1tab, PAGE_SIZE);
@@ -266,7 +254,7 @@ static int setup_pg_tables_pae(int xc_ha
     l1_pgentry_64_t *vl1tab = NULL, *vl1e = NULL;
     l2_pgentry_64_t *vl2tab = NULL, *vl2e = NULL;
     l3_pgentry_64_t *vl3tab = NULL, *vl3e = NULL;
-    uint64_t l1tab, l2tab, l3tab, pl1tab, pl2tab, pl3tab;
+    uint64_t l1tab, l2tab, l3tab;
     unsigned long ppt_alloc, count, nmfn;
 
     /* First allocate page for page dir. */
@@ -287,12 +275,9 @@ static int setup_pg_tables_pae(int xc_ha
         page_array[ppt_alloc] = nmfn;
     }
 
-    alloc_pt(l3tab, vl3tab, pl3tab);
+    alloc_pt(l3tab, vl3tab);
     vl3e = &vl3tab[l3_table_offset_pae(dsi_v_start)];
-    if (shadow_mode_enabled)
-        ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl3tab >> PAGE_SHIFT);
-    else
-        ctxt->ctrlreg[3] = xen_pfn_to_cr3(l3tab >> PAGE_SHIFT);
+    ctxt->ctrlreg[3] = xen_pfn_to_cr3(l3tab >> PAGE_SHIFT);
 
     for ( count = 0; count < ((v_end - dsi_v_start) >> PAGE_SHIFT); count++)
     {
@@ -300,42 +285,33 @@ static int setup_pg_tables_pae(int xc_ha
         {
             if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
             {
-                alloc_pt(l2tab, vl2tab, pl2tab);
+                alloc_pt(l2tab, vl2tab);
                 vl2e = &vl2tab[l2_table_offset_pae(
                     dsi_v_start + (count << PAGE_SHIFT))];
-                if (shadow_mode_enabled)
-                    *vl3e = pl2tab | L3_PROT;
-                else
-                    *vl3e++ = l2tab | L3_PROT;
+                *vl3e++ = l2tab | L3_PROT;
             }
 
-            alloc_pt(l1tab, vl1tab, pl1tab);
+            alloc_pt(l1tab, vl1tab);
             vl1e = &vl1tab[l1_table_offset_pae(
                 dsi_v_start + (count << PAGE_SHIFT))];
-            if (shadow_mode_enabled)
-                *vl2e = pl1tab | L2_PROT;
-            else
-                *vl2e++ = l1tab | L2_PROT;
-        }
-
-        if ( shadow_mode_enabled )
-        {
-            *vl1e = (count << PAGE_SHIFT) | L1_PROT;
-        }
-        else
-        {
-            *vl1e = ((uint64_t)page_array[count] << PAGE_SHIFT) | L1_PROT;
+            *vl2e++ = l1tab | L2_PROT;
+
+        }
+
+        *vl1e = ((uint64_t)page_array[count] << PAGE_SHIFT) | L1_PROT;
+
+        if ( !shadow_mode_enabled )
             if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
                  (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) )
                 *vl1e &= ~_PAGE_RW;
-        }
+
         vl1e++;
     }
 
     /* Xen requires a mid-level pgdir mapping 0xC0000000 region. */
     if ( (vl3tab[3] & _PAGE_PRESENT) == 0 )
     {
-        alloc_pt(l2tab, vl2tab, pl2tab);
+        alloc_pt(l2tab, vl2tab);
         vl3tab[3] = l2tab | L3_PROT;
     }
 
@@ -371,69 +347,48 @@ static int setup_pg_tables_64(int xc_han
     l2_pgentry_t *vl2tab=NULL, *vl2e=NULL;
     l3_pgentry_t *vl3tab=NULL, *vl3e=NULL;
     l4_pgentry_t *vl4tab=NULL, *vl4e=NULL;
-    unsigned long l2tab = 0, pl2tab;
-    unsigned long l1tab = 0, pl1tab;
-    unsigned long l3tab = 0, pl3tab;
-    unsigned long l4tab = 0, pl4tab;
+    unsigned long l2tab = 0;
+    unsigned long l1tab = 0;
+    unsigned long l3tab = 0;
+    unsigned long l4tab = 0;
     unsigned long ppt_alloc;
     unsigned long count;
 
     /* First allocate page for page dir. */
     ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
-    alloc_pt(l4tab, vl4tab, pl4tab);
+    alloc_pt(l4tab, vl4tab);
     vl4e = &vl4tab[l4_table_offset(dsi_v_start)];
-    if (shadow_mode_enabled)
-        ctxt->ctrlreg[3] = xen_pfn_to_cr3(pl4tab >> PAGE_SHIFT);
-    else
-        ctxt->ctrlreg[3] = xen_pfn_to_cr3(l4tab >> PAGE_SHIFT);
+    ctxt->ctrlreg[3] = xen_pfn_to_cr3(l4tab >> PAGE_SHIFT);
 
     for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
     {
         if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
         {
-            alloc_pt(l1tab, vl1tab, pl1tab);
+            alloc_pt(l1tab, vl1tab);
 
             if ( !((unsigned long)vl2e & (PAGE_SIZE-1)) )
             {
-                alloc_pt(l2tab, vl2tab, pl2tab);
+                alloc_pt(l2tab, vl2tab);
                 if ( !((unsigned long)vl3e & (PAGE_SIZE-1)) )
                 {
-                    alloc_pt(l3tab, vl3tab, pl3tab);
+                    alloc_pt(l3tab, vl3tab);
                     vl3e = &vl3tab[l3_table_offset(dsi_v_start + 
(count<<PAGE_SHIFT))];
-                    if (shadow_mode_enabled)
-                        *vl4e = pl3tab | L4_PROT;
-                    else
-                        *vl4e = l3tab | L4_PROT;
-                    vl4e++;
+                    *vl4e++ = l3tab | L4_PROT;
                 }
                 vl2e = &vl2tab[l2_table_offset(dsi_v_start + 
(count<<PAGE_SHIFT))];
-                if (shadow_mode_enabled)
-                    *vl3e = pl2tab | L3_PROT;
-                else
-                    *vl3e = l2tab | L3_PROT;
-                vl3e++;
+                *vl3e++ = l2tab | L3_PROT;
             }
             vl1e = &vl1tab[l1_table_offset(dsi_v_start + (count<<PAGE_SHIFT))];
-            if (shadow_mode_enabled)
-                *vl2e = pl1tab | L2_PROT;
-            else
-                *vl2e = l1tab | L2_PROT;
-            vl2e++;
-        }
-
-        if ( shadow_mode_enabled )
-        {
-            *vl1e = (count << PAGE_SHIFT) | L1_PROT;
-        }
-        else
-        {
-            *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
+            *vl2e++ = l1tab | L2_PROT;
+        }
+
+        *vl1e = (page_array[count] << PAGE_SHIFT) | L1_PROT;
+
+        if ( !shadow_mode_enabled )
             if ( (count >= ((vpt_start-dsi_v_start)>>PAGE_SHIFT)) &&
                  (count <  ((vpt_end  -dsi_v_start)>>PAGE_SHIFT)) )
-                {
-                    *vl1e &= ~_PAGE_RW;
-                }
-        }
+                *vl1e &= ~_PAGE_RW;
+
         vl1e++;
     }
 
@@ -770,6 +725,27 @@ static int setup_guest(int xc_handle,
     {
         PERROR("Could not allocate memory for PV guest.\n");
         goto error_out;
+    }
+
+
+    if ( shadow_mode_enabled )
+    {
+        /*
+         * Enable shadow translate mode. This must happen after
+         * populate physmap because the p2m reservation is based on
+         * the domains current memory allocation.
+         */
+        if ( xc_shadow_control(xc_handle, dom,
+                           XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE,
+                           NULL, 0, NULL, 0, NULL) < 0 )
+        {
+            PERROR("Could not enable translation mode");
+            goto error_out;
+        }
+
+        /* Reinitialise the gpfn->gmfn array. */
+        for ( i = 0; i < nr_pages; i++ )
+            page_array[i] = i;
     }
 
     rc = (load_funcs.loadimage)(image, image_size,
@@ -972,15 +948,6 @@ static int setup_guest(int xc_handle,
     if ( shadow_mode_enabled )
     {
         struct xen_add_to_physmap xatp;
-
-        /* Enable shadow translate mode */
-        if ( xc_shadow_control(xc_handle, dom,
-                               XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE,
-                               NULL, 0, NULL, 0, NULL) < 0 )
-        {
-            PERROR("Could not enable translation mode");
-            goto error_out;
-        }
 
         guest_shared_info_mfn = (vsharedinfo_start-dsi.v_start) >> PAGE_SHIFT;
 
@@ -1083,8 +1050,7 @@ static int setup_guest(int xc_handle,
         if ( pfn >= nr_pages )
             goto error_out;
         domctl.domain = (domid_t)dom;
-        domctl.u.hypercall_init.gmfn   = shadow_mode_enabled ?
-            pfn : page_array[pfn];
+        domctl.u.hypercall_init.gmfn = page_array[pfn];
         domctl.cmd = XEN_DOMCTL_hypercall_init;
         if ( xc_domctl(xc_handle, &domctl) )
             goto error_out;
diff -r e948333c2c38 -r c0d41ac21486 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Tue Dec 12 15:23:51 2006 -0700
+++ b/xen/arch/x86/mm.c Tue Dec 12 15:25:02 2006 -0700
@@ -435,15 +435,6 @@ int map_ldt_shadow_page(unsigned int off
         return 0;
 
     okay = get_page_and_type(mfn_to_page(mfn), d, PGT_ldt_page);
-
-    if ( !okay && unlikely(shadow_mode_refcounts(d)) )
-    {
-        shadow_lock(d);
-        shadow_remove_write_access(d->vcpu[0], _mfn(mfn), 0, 0);
-        okay = get_page_and_type(mfn_to_page(mfn), d, PGT_ldt_page);
-        shadow_unlock(d);
-    }
-
     if ( unlikely(!okay) )
         return 0;
 
@@ -508,8 +499,6 @@ get_linear_pagetable(
     unsigned long x, y;
     struct page_info *page;
     unsigned long pfn;
-
-    ASSERT( !shadow_mode_refcounts(d) );
 
     if ( (root_get_flags(re) & _PAGE_RW) )
     {
@@ -821,8 +810,6 @@ static int alloc_l1_table(struct page_in
     l1_pgentry_t  *pl1e;
     int            i;
 
-    ASSERT(!shadow_mode_refcounts(d));
-
     pl1e = map_domain_page(pfn);
 
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
@@ -873,8 +860,6 @@ static int create_pae_xen_mappings(l3_pg
      *  2. Cannot appear in another page table's L3:
      *     a. alloc_l3_table() calls this function and this check will fail
      *     b. mod_l3_entry() disallows updates to slot 3 in an existing table
-     *
-     * XXX -- this needs revisiting for shadow_mode_refcount()==true...
      */
     page = l3e_get_page(l3e3);
     BUG_ON(page->u.inuse.type_info & PGT_pinned);
@@ -959,8 +944,6 @@ static int alloc_l2_table(struct page_in
     l2_pgentry_t  *pl2e;
     int            i;
 
-    ASSERT(!shadow_mode_refcounts(d));
-    
     pl2e = map_domain_page(pfn);
 
     for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
@@ -1008,8 +991,6 @@ static int alloc_l3_table(struct page_in
     l3_pgentry_t  *pl3e;
     int            i;
 
-    ASSERT(!shadow_mode_refcounts(d));
-
 #ifdef CONFIG_X86_PAE
     /*
      * PAE pgdirs above 4GB are unacceptable if the guest does not understand
@@ -1074,8 +1055,6 @@ static int alloc_l4_table(struct page_in
     unsigned long  pfn = page_to_mfn(page);
     l4_pgentry_t  *pl4e = page_to_virt(page);
     int            i;
-
-    ASSERT(!shadow_mode_refcounts(d));
 
     for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
     {

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.