[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] Merge with PPC Xen tree.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Date 1184663988 -3600
# Node ID 3ce2b9fc4900103af7b4f180ff6622b480d55c84
# Parent  14c48f11619e4a1c8217cd029fece60e8a46798a
# Parent  41918416db51d1eeaba7c71259e1c0f0ea3426f6
Merge with PPC Xen tree.
---
 arch/i386/mm/fault-xen.c        |   33 +++++++++++++++++-------
 drivers/xen/core/gnttab.c       |    4 +--
 drivers/xen/netfront/netfront.c |   53 +++++++++++++++++++++++++++++++++-------
 3 files changed, 70 insertions(+), 20 deletions(-)

diff -r 14c48f11619e -r 3ce2b9fc4900 arch/i386/mm/fault-xen.c
--- a/arch/i386/mm/fault-xen.c  Fri Jul 13 17:14:12 2007 -0500
+++ b/arch/i386/mm/fault-xen.c  Tue Jul 17 10:19:48 2007 +0100
@@ -739,18 +739,31 @@ void vmalloc_sync_all(void)
         * problematic: insync can only get set bits added, and updates to
         * start are only improving performance (without affecting correctness
         * if undone).
-        */
-       static DECLARE_BITMAP(insync, PTRS_PER_PGD);
+        * XEN: To work on PAE, we need to iterate over PMDs rather than PGDs.
+        *      This change works just fine with 2-level paging too.
+        */
+#define sync_index(a) ((a) >> PMD_SHIFT)
+       static DECLARE_BITMAP(insync, PTRS_PER_PGD*PTRS_PER_PMD);
        static unsigned long start = TASK_SIZE;
        unsigned long address;
 
        BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
-       for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
-               if (!test_bit(pgd_index(address), insync)) {
+       for (address = start;
+            address >= TASK_SIZE && address < hypervisor_virt_start;
+            address += 1UL << PMD_SHIFT) {
+               if (!test_bit(sync_index(address), insync)) {
                        unsigned long flags;
                        struct page *page;
 
                        spin_lock_irqsave(&pgd_lock, flags);
+                       /*
+                        * XEN: vmalloc_sync_one() failure path logic assumes
+                        * pgd_list is non-empty.
+                        */
+                       if (unlikely(!pgd_list)) {
+                               spin_unlock_irqrestore(&pgd_lock, flags);
+                               return;
+                       }
                        for (page = pgd_list; page; page =
                                        (struct page *)page->index)
                                if (!vmalloc_sync_one(page_address(page),
@@ -760,10 +773,10 @@ void vmalloc_sync_all(void)
                                }
                        spin_unlock_irqrestore(&pgd_lock, flags);
                        if (!page)
-                               set_bit(pgd_index(address), insync);
+                               set_bit(sync_index(address), insync);
                }
-               if (address == start && test_bit(pgd_index(address), insync))
-                       start = address + PGDIR_SIZE;
-       }
-}
-#endif
+               if (address == start && test_bit(sync_index(address), insync))
+                       start = address + (1UL << PMD_SHIFT);
+       }
+}
+#endif
diff -r 14c48f11619e -r 3ce2b9fc4900 drivers/xen/core/gnttab.c
--- a/drivers/xen/core/gnttab.c Fri Jul 13 17:14:12 2007 -0500
+++ b/drivers/xen/core/gnttab.c Tue Jul 17 10:19:48 2007 +0100
@@ -184,7 +184,7 @@ int gnttab_end_foreign_access_ref(grant_
        nflags = shared[ref].flags;
        do {
                if ((flags = nflags) & (GTF_reading|GTF_writing)) {
-                       printk(KERN_ALERT "WARNING: g.e. still in use!\n");
+                       printk(KERN_DEBUG "WARNING: g.e. still in use!\n");
                        return 0;
                }
        } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) 
!=
@@ -204,7 +204,7 @@ void gnttab_end_foreign_access(grant_ref
        } else {
                /* XXX This needs to be fixed so that the ref and page are
                   placed on a list to be freed up later. */
-               printk(KERN_WARNING
+               printk(KERN_DEBUG
                       "WARNING: leaking g.e. and page still in use!\n");
        }
 }
diff -r 14c48f11619e -r 3ce2b9fc4900 drivers/xen/netfront/netfront.c
--- a/drivers/xen/netfront/netfront.c   Fri Jul 13 17:14:12 2007 -0500
+++ b/drivers/xen/netfront/netfront.c   Tue Jul 17 10:19:48 2007 +0100
@@ -1536,7 +1536,7 @@ static void netif_release_tx_bufs(struct
        }
 }
 
-static void netif_release_rx_bufs(struct netfront_info *np)
+static void netif_release_rx_bufs_flip(struct netfront_info *np)
 {
        struct mmu_update      *mmu = np->rx_mmu;
        struct multicall_entry *mcl = np->rx_mcl;
@@ -1545,11 +1545,6 @@ static void netif_release_rx_bufs(struct
        unsigned long mfn;
        int xfer = 0, noxfer = 0, unused = 0;
        int id, ref, rc;
-
-       if (np->copying_receiver) {
-               WPRINTK("%s: fix me for copying receiver.\n", __FUNCTION__);
-               return;
-       }
 
        skb_queue_head_init(&free_list);
 
@@ -1597,7 +1592,7 @@ static void netif_release_rx_bufs(struct
                xfer++;
        }
 
-       IPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
+       DPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
                __FUNCTION__, xfer, noxfer, unused);
 
        if (xfer) {
@@ -1624,6 +1619,45 @@ static void netif_release_rx_bufs(struct
        spin_unlock_bh(&np->rx_lock);
 }
 
+static void netif_release_rx_bufs_copy(struct netfront_info *np)
+{
+       struct sk_buff *skb;
+       int i, ref;
+       int busy = 0, inuse = 0;
+
+       spin_lock_bh(&np->rx_lock);
+
+       for (i = 0; i < NET_RX_RING_SIZE; i++) {
+               ref = np->grant_rx_ref[i];
+
+               if (ref == GRANT_INVALID_REF)
+                       continue;
+
+               inuse++;
+
+               skb = np->rx_skbs[i];
+
+               if (!gnttab_end_foreign_access_ref(ref, 0))
+               {
+                       busy++;
+                       continue;
+               }
+
+               gnttab_release_grant_reference(&np->gref_rx_head, ref);
+               np->grant_rx_ref[i] = GRANT_INVALID_REF;
+               add_id_to_freelist(np->rx_skbs, i);
+
+               skb_shinfo(skb)->nr_frags = 0;
+               dev_kfree_skb(skb);
+       }
+
+       if (busy)
+               DPRINTK("%s: Unable to release %d of %d inuse grant references 
out of %ld total.\n",
+                       __FUNCTION__, busy, inuse, NET_RX_RING_SIZE);
+
+       spin_unlock_bh(&np->rx_lock);
+}
+
 static int network_close(struct net_device *dev)
 {
        struct netfront_info *np = netdev_priv(dev);
@@ -1817,7 +1851,10 @@ static void netif_uninit(struct net_devi
 {
        struct netfront_info *np = netdev_priv(dev);
        netif_release_tx_bufs(np);
-       netif_release_rx_bufs(np);
+       if (np->copying_receiver)
+               netif_release_rx_bufs_copy(np);
+       else
+               netif_release_rx_bufs_flip(np);
        gnttab_free_grant_references(np->gref_tx_head);
        gnttab_free_grant_references(np->gref_rx_head);
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.