[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: Fix debug build.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxxxxxxxx>
# Date 1191227524 -3600
# Node ID 96895c637d27755226d33ddc34fc966bf839bb71
# Parent  5c7afb32df999ceca475f6582e3df3d7ff42d2fe
x86: Fix debug build.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 xen/arch/x86/hvm/vmx/vtd/intel-iommu.c |   14 +++++++-------
 1 files changed, 7 insertions(+), 7 deletions(-)

diff -r 5c7afb32df99 -r 96895c637d27 xen/arch/x86/hvm/vmx/vtd/intel-iommu.c
--- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c    Mon Oct 01 06:39:00 2007 +0100
+++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c    Mon Oct 01 09:32:04 2007 +0100
@@ -173,7 +173,7 @@ static struct page_info *addr_to_dma_pag
         if ( dma_pte_addr(*pte) == 0 )
         {
             pg = alloc_domheap_page(NULL);
-            vaddr = map_domain_page(mfn_x(page_to_mfn(pg)));
+            vaddr = map_domain_page(page_to_mfn(pg));
             if ( !vaddr )
             {
                 spin_unlock_irqrestore(&hd->mapping_lock, flags);
@@ -195,7 +195,7 @@ static struct page_info *addr_to_dma_pag
         else
         {
             pg = maddr_to_page(pte->val);
-            vaddr = map_domain_page(mfn_x(page_to_mfn(pg)));
+            vaddr = map_domain_page(page_to_mfn(pg));
             if ( !vaddr )
             {
                 spin_unlock_irqrestore(&hd->mapping_lock, flags);
@@ -250,7 +250,7 @@ static struct page_info *dma_addr_level_
         if ( level == total )
             return pg;
 
-        parent = map_domain_page(mfn_x(page_to_mfn(pg)));
+        parent = map_domain_page(page_to_mfn(pg));
         total--;
     }
 
@@ -542,7 +542,7 @@ static void dma_pte_clear_one(struct dom
     pg = dma_addr_level_page(domain, addr, 1);
     if ( !pg )
         return;
-    pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg)));
+    pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
     pte += address_level_offset(addr, 1);
     if ( pte )
     {
@@ -612,7 +612,7 @@ void dma_pte_free_pagetable(struct domai
             pg = dma_addr_level_page(domain, tmp, level);
             if ( !pg )
                 return;
-            pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg)));
+            pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
             pte += address_level_offset(tmp, level);
             dma_clear_pte(*pte);
             iommu_flush_cache_entry(iommu, pte);
@@ -1493,7 +1493,7 @@ int iommu_map_page(struct domain *d, pad
     pg = addr_to_dma_page(d, gfn << PAGE_SHIFT_4K);
     if ( !pg )
         return -ENOMEM;
-    pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg)));
+    pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
     pte += mfn & LEVEL_MASK;
     dma_set_pte_addr(*pte, mfn << PAGE_SHIFT_4K);
     dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE);
@@ -1554,7 +1554,7 @@ int iommu_page_mapping(struct domain *do
         pg = addr_to_dma_page(domain, iova + PAGE_SIZE_4K * index);
         if ( !pg )
             return -ENOMEM;
-        pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg)));
+        pte = (struct dma_pte *)map_domain_page(page_to_mfn(pg));
         pte += start_pfn & LEVEL_MASK;
         dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K);
         dma_set_pte_prot(*pte, prot);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.