[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] vt-d: Free memory of g2m_ioport_list.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1194342202 0
# Node ID 55b24410ebfc3984a4d17721d9ce20713b67ceaf
# Parent  e2445c775efcc6804742be3a2f9d976712c6295b
vt-d: Free memory of g2m_ioport_list.

This patch frees memory of g2m_ioport_list when remove g2m_ioport or
destroy iommu domain to avoid memory leak. In addtion, does some
cleanup on domctl.c.

Signed-off-by: Weidong Han <weidong.han@xxxxxxxxx>
---
 xen/arch/x86/domctl.c         |   50 ++++++++++++++++++++++--------------------
 xen/arch/x86/hvm/vmx/vtd/io.c |   13 ++++++++++
 2 files changed, 40 insertions(+), 23 deletions(-)

diff -r e2445c775efc -r 55b24410ebfc xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Tue Nov 06 09:41:57 2007 +0000
+++ b/xen/arch/x86/domctl.c     Tue Nov 06 09:43:22 2007 +0000
@@ -535,9 +535,10 @@ long arch_do_domctl(
         if ( !vtd_enabled )
             break;
 
-        if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) ) {
+        if ( unlikely((d = get_domain_by_id(domctl->domain)) == NULL) )
+        {
             gdprintk(XENLOG_ERR,
-                "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n"); 
+                "XEN_DOMCTL_assign_device: get_domain_by_id() failed\n");
             break;
         }
         hd = domain_hvm_iommu(d);
@@ -548,7 +549,7 @@ long arch_do_domctl(
             break;
 
         ret = assign_device(d, bus, devfn);
-        gdprintk(XENLOG_ERR, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n",
+        gdprintk(XENLOG_INFO, "XEN_DOMCTL_assign_device: bdf = %x:%x:%x\n",
             bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
         put_domain(d);
     }
@@ -569,7 +570,7 @@ long arch_do_domctl(
             gdprintk(XENLOG_ERR, "pt_irq_create_bind failed!\n");
         rcu_unlock_domain(d);
     }
-    break;    
+    break;
 
     case XEN_DOMCTL_memory_mapping:
     {
@@ -587,25 +588,25 @@ long arch_do_domctl(
         if ( unlikely((d = rcu_lock_domain_by_id(domctl->domain)) == NULL) )
             break;
 
-        ret=0;        
-        if ( domctl->u.memory_mapping.add_mapping ) 
+        ret=0;
+        if ( domctl->u.memory_mapping.add_mapping )
         {
             gdprintk(XENLOG_INFO,
                 "memory_map:add: gfn=%lx mfn=%lx nr_mfns=%lx\n",
-                gfn, mfn, nr_mfns);   
-            
+                gfn, mfn, nr_mfns);
+
             ret = iomem_permit_access(d, mfn, mfn + nr_mfns - 1);
             for ( i = 0; i < nr_mfns; i++ )
-                set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i)); 
-        }
-        else 
+                set_mmio_p2m_entry(d, gfn+i, _mfn(mfn+i));
+        }
+        else
         {
             gdprintk(XENLOG_INFO,
                 "memory_map:remove: gfn=%lx mfn=%lx nr_mfns=%lx\n",
                  gfn, mfn, nr_mfns);
 
             for ( i = 0; i < nr_mfns; i++ )
-                clear_mmio_p2m_entry(d, gfn+i); 
+                clear_mmio_p2m_entry(d, gfn+i);
             ret = iomem_deny_access(d, mfn, mfn + nr_mfns - 1);
         }
 
@@ -644,39 +645,42 @@ long arch_do_domctl(
             gdprintk(XENLOG_INFO,
                 "ioport_map:add f_gport=%x f_mport=%x np=%x\n",
                 fgp, fmp, np);
-                
+
             list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
-                if (g2m_ioport->mport == fmp ) {
+                if (g2m_ioport->mport == fmp )
+                {
                     g2m_ioport->gport = fgp;
-                    g2m_ioport->np = np;                    
+                    g2m_ioport->np = np;
                     found = 1;
                     break;
                 }
-            if ( !found ) 
-            {                 
+            if ( !found )
+            {
                 g2m_ioport = xmalloc(struct g2m_ioport);
                 g2m_ioport->gport = fgp;
                 g2m_ioport->mport = fmp;
                 g2m_ioport->np = np;
                 list_add_tail(&g2m_ioport->list, &hd->g2m_ioport_list);
-            } 
+            }
             ret = ioports_permit_access(d, fmp, fmp + np - 1);
-            
-        }
-        else {
+        }
+        else
+        {
             gdprintk(XENLOG_INFO,
                 "ioport_map:remove f_gport=%x f_mport=%x np=%x\n",
                 fgp, fmp, np);
             list_for_each_entry(g2m_ioport, &hd->g2m_ioport_list, list)
-                if ( g2m_ioport->mport == fmp ) {
+                if ( g2m_ioport->mport == fmp )
+                {
                     list_del(&g2m_ioport->list);
+                    xfree(g2m_ioport);
                     break;
                 }
             ret = ioports_deny_access(d, fmp, fmp + np - 1);
         }
         rcu_unlock_domain(d);
     }
-    break;    
+    break;
 
     case XEN_DOMCTL_pin_mem_cacheattr:
     {
diff -r e2445c775efc -r 55b24410ebfc xen/arch/x86/hvm/vmx/vtd/io.c
--- a/xen/arch/x86/hvm/vmx/vtd/io.c     Tue Nov 06 09:41:57 2007 +0000
+++ b/xen/arch/x86/hvm/vmx/vtd/io.c     Tue Nov 06 09:43:22 2007 +0000
@@ -164,6 +164,9 @@ void iommu_domain_destroy(struct domain 
 {
     struct hvm_irq_dpci *hvm_irq_dpci = d->arch.hvm_domain.irq.dpci;
     uint32_t i;
+    struct hvm_iommu *hd  = domain_hvm_iommu(d);
+    struct list_head *ioport_list, *tmp;
+    struct g2m_ioport *ioport;
 
     if ( !vtd_enabled )
         return;
@@ -180,5 +183,15 @@ void iommu_domain_destroy(struct domain 
         xfree(hvm_irq_dpci);
     }
 
+    if ( hd )
+    {
+        list_for_each_safe ( ioport_list, tmp, &hd->g2m_ioport_list )
+        {
+            ioport = list_entry(ioport_list, struct g2m_ioport, list);
+            list_del(&ioport->list);
+            xfree(ioport);
+        }
+    }
+
     iommu_domain_teardown(d);
 }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.