[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [NET] front: Fix rx buffer leak when tearing down an interface.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxxx
# Node ID b85da7cd9ea5a74d07da5098c37ee10d2ca68c8f
# Parent  7258181ab445dcdcdb5da7a5c0db05906ae3923a
[NET] front: Fix rx buffer leak when tearing down an interface.
Signed-off-by: Gerd Hoffmann <kraxel@xxxxxxx>
---
 linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c   |   13 +++
 linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c |   80 +++++++++++++++++++
 linux-2.6-xen-sparse/include/xen/balloon.h           |    2 
 3 files changed, 95 insertions(+)

diff -r 7258181ab445 -r b85da7cd9ea5 
linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c
--- a/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Sat Aug 19 
10:21:02 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/balloon/balloon.c        Sat Aug 19 
10:58:07 2006 +0100
@@ -606,8 +606,21 @@ void balloon_dealloc_empty_page_range(
        schedule_work(&balloon_worker);
 }
 
+void balloon_release_driver_page(struct page *page)
+{
+       unsigned long flags;
+
+       balloon_lock(flags);
+       balloon_append(page);
+       driver_pages--;
+       balloon_unlock(flags);
+
+       schedule_work(&balloon_worker);
+}
+
 EXPORT_SYMBOL_GPL(balloon_update_driver_allowance);
 EXPORT_SYMBOL_GPL(balloon_alloc_empty_page_range);
 EXPORT_SYMBOL_GPL(balloon_dealloc_empty_page_range);
+EXPORT_SYMBOL_GPL(balloon_release_driver_page);
 
 MODULE_LICENSE("Dual BSD/GPL");
diff -r 7258181ab445 -r b85da7cd9ea5 
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Sat Aug 19 
10:21:02 2006 +0100
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Sat Aug 19 
10:58:07 2006 +0100
@@ -1397,6 +1397,85 @@ err:
        return more_to_do;
 }
 
+static void netif_release_rx_bufs(struct netfront_info *np)
+{
+       struct mmu_update      *mmu = np->rx_mmu;
+       struct multicall_entry *mcl = np->rx_mcl;
+       struct sk_buff *skb;
+       unsigned long mfn;
+       int xfer = 0, noxfer = 0, unused = 0;
+       int id, ref;
+
+       if (np->copying_receiver) {
+               printk("%s: fix me for copying receiver.\n", __FUNCTION__);
+               return;
+       }
+
+       spin_lock(&np->rx_lock);
+
+       for (id = 0; id < NET_RX_RING_SIZE; id++) {
+               if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
+                       unused++;
+                       continue;
+               }
+
+               skb = np->rx_skbs[id];
+               mfn = gnttab_end_foreign_transfer_ref(ref);
+               gnttab_release_grant_reference(&np->gref_rx_head, ref);
+               np->grant_rx_ref[id] = GRANT_INVALID_REF;
+               add_id_to_freelist(np->rx_skbs, id);
+
+               if (0 == mfn) {
+                       struct page *page = skb_shinfo(skb)->frags[0].page;
+                       balloon_release_driver_page(page);
+                       skb_shinfo(skb)->nr_frags = 0;
+                       dev_kfree_skb(skb);
+                       noxfer++;
+                       continue;
+               }
+
+               if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+                       /* Remap the page. */
+                       struct page *page = skb_shinfo(skb)->frags[0].page;
+                       unsigned long pfn = page_to_pfn(page);
+                       void *vaddr = page_address(page);
+
+                       MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
+                                               pfn_pte_ma(mfn, PAGE_KERNEL),
+                                               0);
+                       mcl++;
+                       mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
+                               | MMU_MACHPHYS_UPDATE;
+                       mmu->val = pfn;
+                       mmu++;
+
+                       set_phys_to_machine(pfn, mfn);
+               }
+               dev_kfree_skb(skb);
+               xfer++;
+       }
+
+       printk("%s: %d xfer, %d noxfer, %d unused\n",
+              __FUNCTION__, xfer, noxfer, unused);
+
+       if (xfer) {
+               /* Some pages are no longer absent... */
+               balloon_update_driver_allowance(-xfer);
+
+               if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+                       /* Do all the remapping work and M2P updates. */
+                       mcl->op = __HYPERVISOR_mmu_update;
+                       mcl->args[0] = (unsigned long)np->rx_mmu;
+                       mcl->args[1] = mmu - np->rx_mmu;
+                       mcl->args[2] = 0;
+                       mcl->args[3] = DOMID_SELF;
+                       mcl++;
+                       HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
+               }
+       }
+
+       spin_unlock(&np->rx_lock);
+}
 
 static int network_close(struct net_device *dev)
 {
@@ -1553,6 +1632,7 @@ static void netif_uninit(struct net_devi
 static void netif_uninit(struct net_device *dev)
 {
        struct netfront_info *np = netdev_priv(dev);
+       netif_release_rx_bufs(np);
        gnttab_free_grant_references(np->gref_tx_head);
        gnttab_free_grant_references(np->gref_rx_head);
 }
diff -r 7258181ab445 -r b85da7cd9ea5 linux-2.6-xen-sparse/include/xen/balloon.h
--- a/linux-2.6-xen-sparse/include/xen/balloon.h        Sat Aug 19 10:21:02 
2006 +0100
+++ b/linux-2.6-xen-sparse/include/xen/balloon.h        Sat Aug 19 10:58:07 
2006 +0100
@@ -52,6 +52,8 @@ balloon_dealloc_empty_page_range(
 balloon_dealloc_empty_page_range(
        struct page *page, unsigned long nr_pages);
 
+void balloon_release_driver_page(struct page *page);
+
 /*
  * Prevent the balloon driver from changing the memory reservation during
  * a driver critical region.

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.