[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [patch 6/6] netif_release_rx_bufs


  • To: Xen devel list <xen-devel@xxxxxxxxxxxxxxxxxxx>
  • From: Gerd Hoffmann <kraxel@xxxxxxx>
  • Date: Thu, 17 Aug 2006 16:13:32 +0200
  • Delivery-date: Thu, 17 Aug 2006 07:14:01 -0700
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

  Hi,

This patch adds a netif_release_rx_bufs() function to the netfront
driver.  It intends to fix the rx buffer page leak.  Unfortunaly it
doesn't work perfectly, the reason is that reclaiming the pages granted
to the backend driver works only if the backend driver gave them back
already, filled with network data.  Reclaiming unfilled rx buffers does
NOT work.

I think we need either a way to get back pages with transfer grants
without cooperation from the backend driver, or we need some way to say
"pretty pretty please, give me back my rx buffers" to the netback driver.

comments?

  Gerd

-- 
Gerd Hoffmann <kraxel@xxxxxxx>
http://www.suse.de/~kraxel/julika-dora.jpeg
Signed-off-by: Gerd Hoffmann <kraxel@xxxxxxx>
Index: source-lnx-stable-22813/drivers/xen/netfront/netfront.c
===================================================================
--- source-lnx-stable-22813.orig/drivers/xen/netfront/netfront.c        
2006-08-17 15:20:17.000000000 +0200
+++ source-lnx-stable-22813/drivers/xen/netfront/netfront.c     2006-08-17 
15:20:17.000000000 +0200
@@ -494,6 +494,7 @@ static int network_open(struct net_devic
 {
        struct netfront_info *np = netdev_priv(dev);
 
+       DPRINTK("%s\n", np->xbdev->nodename);
        memset(&np->stats, 0, sizeof(np->stats));
 
        network_alloc_rx_buffers(dev);
@@ -1285,10 +1286,80 @@ err:
        return more_to_do;
 }
 
+static void netif_release_rx_bufs(struct netfront_info *np)
+{
+       struct mmu_update      *mmu = np->rx_mmu;
+       struct multicall_entry *mcl = np->rx_mcl;
+       struct sk_buff *skb;
+       unsigned long mfn;
+       int bufs = 0, gnttab = 0, unused = 0;
+       int id, ref;
+
+       spin_lock(&np->rx_lock);
+
+       for (id = 0; id < NET_RX_RING_SIZE; id++) {
+               if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
+                       unused++;
+                       continue;
+               }
+               if ((mfn = gnttab_end_foreign_transfer_ref(ref)) == 0) {
+                       gnttab++;
+                       continue;
+               }
+
+               gnttab_release_grant_reference(&np->gref_rx_head, ref);
+               np->grant_rx_ref[id] = GRANT_INVALID_REF;
+
+               skb = np->rx_skbs[id];
+               add_id_to_freelist(np->rx_skbs, id);
+
+               if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+                       /* Remap the page. */
+                       MULTI_update_va_mapping(mcl, (unsigned long)skb->head,
+                                               pfn_pte_ma(mfn, PAGE_KERNEL),
+                                               0);
+                       mcl++;
+                       mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
+                               | MMU_MACHPHYS_UPDATE;
+                       mmu->val = __pa(skb->head) >> PAGE_SHIFT;
+                       mmu++;
+
+                       set_phys_to_machine(__pa(skb->head) >> PAGE_SHIFT,
+                                           mfn);
+               }
+               bufs++;
+
+#if 0 /* FIXME */
+               dev_kfree_skb(skb);
+#endif
+       }
+
+       printk("%s: %d released ok, %d gnttab errs, %d unused slots\n",
+              __FUNCTION__, bufs, gnttab, unused);
+       if (0 == bufs)
+               return;
+
+       /* Some pages are no longer absent... */
+       balloon_update_driver_allowance(-bufs);
+
+       if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+               /* Do all the remapping work, and M2P updates, in one big 
hypercall. */
+               mcl->op = __HYPERVISOR_mmu_update;
+               mcl->args[0] = (unsigned long)np->rx_mmu;
+               mcl->args[1] = mmu - np->rx_mmu;
+               mcl->args[2] = 0;
+               mcl->args[3] = DOMID_SELF;
+               mcl++;
+               HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
+       }
+
+       spin_unlock(&np->rx_lock);
+}
 
 static int network_close(struct net_device *dev)
 {
        struct netfront_info *np = netdev_priv(dev);
+       DPRINTK("%s\n", np->xbdev->nodename);
        netif_stop_queue(np->netdev);
        return 0;
 }
@@ -1427,6 +1498,8 @@ static void network_connect(struct net_d
 static void netif_uninit(struct net_device *dev)
 {
        struct netfront_info *np = netdev_priv(dev);
+       DPRINTK("%s\n", np->xbdev->nodename);
+       netif_release_rx_bufs(np);
        gnttab_free_grant_references(np->gref_tx_head);
        gnttab_free_grant_references(np->gref_rx_head);
 }
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.