[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] vtd: code cleanup



# HG changeset patch
# User Keir Fraser <keir.fraser@xxxxxxxxxx>
# Date 1224241451 -3600
# Node ID 609d0d34450f373b930188477e9087a62481f9a4
# Parent  50aaffd8f87c7dd085cc02bf471f1e4d1155dd11
vtd: code cleanup

Remove iommu_page_mapping/unmapping, which are redundant because
intel_iommu_map_page/unmap_page can handle their functions.

Correct IRTA_REG_EIMI_SHIFT to IRTA_REG_EIME_SHIFT.

and also remove useless declarations in iommu.c

Signed-off-by: Weidong Han <weidong.han@xxxxxxxxx>
---
 xen/drivers/passthrough/iommu.c        |    2 
 xen/drivers/passthrough/vtd/intremap.c |    6 -
 xen/drivers/passthrough/vtd/iommu.c    |  101 +++++----------------------------
 3 files changed, 19 insertions(+), 90 deletions(-)

diff -r 50aaffd8f87c -r 609d0d34450f xen/drivers/passthrough/iommu.c
--- a/xen/drivers/passthrough/iommu.c   Fri Oct 17 12:00:25 2008 +0100
+++ b/xen/drivers/passthrough/iommu.c   Fri Oct 17 12:04:11 2008 +0100
@@ -19,8 +19,6 @@
 #include <xen/paging.h>
 #include <xen/guest_access.h>
 
-extern struct iommu_ops intel_iommu_ops;
-extern struct iommu_ops amd_iommu_ops;
 static void parse_iommu_param(char *s);
 static int iommu_populate_page_table(struct domain *d);
 int intel_vtd_setup(void);
diff -r 50aaffd8f87c -r 609d0d34450f xen/drivers/passthrough/vtd/intremap.c
--- a/xen/drivers/passthrough/vtd/intremap.c    Fri Oct 17 12:00:25 2008 +0100
+++ b/xen/drivers/passthrough/vtd/intremap.c    Fri Oct 17 12:04:11 2008 +0100
@@ -479,10 +479,10 @@ int intremap_setup(struct iommu *iommu)
 #if defined(ENABLED_EXTENDED_INTERRUPT_SUPPORT)
     /* set extended interrupt mode bit */
     ir_ctrl->iremap_maddr |=
-            ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIMI_SHIFT) : 0;
+            ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0;
 #endif
-    /* size field = 256 entries per 4K page = 8 - 1 */
-    ir_ctrl->iremap_maddr |= 7;
+    /* set size of the interrupt remapping table */ 
+    ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE;
     dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr);
 
     /* set SIRTP */
diff -r 50aaffd8f87c -r 609d0d34450f xen/drivers/passthrough/vtd/iommu.c
--- a/xen/drivers/passthrough/vtd/iommu.c       Fri Oct 17 12:00:25 2008 +0100
+++ b/xen/drivers/passthrough/vtd/iommu.c       Fri Oct 17 12:04:11 2008 +0100
@@ -567,26 +567,6 @@ static void dma_pte_clear_one(struct dom
     }
 
     unmap_vtd_domain_page(page);
-}
-
-/* clear last level pte, a tlb flush should be followed */
-static void dma_pte_clear_range(struct domain *domain, u64 start, u64 end)
-{
-    struct hvm_iommu *hd = domain_hvm_iommu(domain);
-    int addr_width = agaw_to_width(hd->agaw);
-
-    start &= (((u64)1) << addr_width) - 1;
-    end &= (((u64)1) << addr_width) - 1;
-    /* in case it's partial page */
-    start = PAGE_ALIGN_4K(start);
-    end &= PAGE_MASK_4K;
-
-    /* we don't need lock here, nobody else touches the iova range */
-    while ( start < end )
-    {
-        dma_pte_clear_one(domain, start);
-        start += PAGE_SIZE_4K;
-    }
 }
 
 static void iommu_free_pagetable(u64 pt_maddr, int level)
@@ -1511,75 +1491,26 @@ int intel_iommu_unmap_page(struct domain
     return 0;
 }
 
-int iommu_page_mapping(struct domain *domain, paddr_t iova,
-                       paddr_t hpa, size_t size, int prot)
-{
-    struct hvm_iommu *hd = domain_hvm_iommu(domain);
-    struct acpi_drhd_unit *drhd;
-    struct iommu *iommu;
-    u64 start_pfn, end_pfn;
-    struct dma_pte *page = NULL, *pte = NULL;
-    int index;
-    u64 pg_maddr;
-
-    if ( (prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0 )
-        return -EINVAL;
-
-    iova = (iova >> PAGE_SHIFT_4K) << PAGE_SHIFT_4K;
-    start_pfn = hpa >> PAGE_SHIFT_4K;
-    end_pfn = (PAGE_ALIGN_4K(hpa + size)) >> PAGE_SHIFT_4K;
-    index = 0;
-    while ( start_pfn < end_pfn )
-    {
-        pg_maddr = addr_to_dma_page_maddr(domain, iova + PAGE_SIZE_4K*index, 
1);
-        if ( pg_maddr == 0 )
-            return -ENOMEM;
-        page = (struct dma_pte *)map_vtd_domain_page(pg_maddr);
-        pte = page + (start_pfn & LEVEL_MASK);
-        dma_set_pte_addr(*pte, (paddr_t)start_pfn << PAGE_SHIFT_4K);
-        dma_set_pte_prot(*pte, prot);
-        iommu_flush_cache_entry(pte);
-        unmap_vtd_domain_page(page);
-        start_pfn++;
-        index++;
-    }
-
-    if ( index > 0 )
-    {
-        for_each_drhd_unit ( drhd )
-        {
-            iommu = drhd->iommu;
-            if ( test_bit(iommu->index, &hd->iommu_bitmap) )
-                if ( iommu_flush_iotlb_psi(iommu, domain_iommu_domid(domain),
-                                           iova, index, 1))
-                    iommu_flush_write_buffer(iommu);
-        }
-    }
-
-    return 0;
-}
-
-int iommu_page_unmapping(struct domain *domain, paddr_t addr, size_t size)
-{
-    dma_pte_clear_range(domain, addr, addr + size);
-
-    return 0;
-}
-
 static int iommu_prepare_rmrr_dev(struct domain *d,
                                   struct acpi_rmrr_unit *rmrr,
                                   u8 bus, u8 devfn)
 {
-    u64 size;
-    int ret;
-
-    /* page table init */
-    size = rmrr->end_address - rmrr->base_address + 1;
-    ret = iommu_page_mapping(d, rmrr->base_address,
-                             rmrr->base_address, size,
-                             DMA_PTE_READ|DMA_PTE_WRITE);
-    if ( ret )
-        return ret;
+    int ret = 0;
+    u64 base, end;
+    unsigned long base_pfn, end_pfn;
+
+    ASSERT(rmrr->base_address < rmrr->end_address);
+    
+    base = rmrr->base_address & PAGE_MASK_4K;
+    base_pfn = base >> PAGE_SHIFT_4K;
+    end = PAGE_ALIGN_4K(rmrr->end_address);
+    end_pfn = end >> PAGE_SHIFT_4K;
+
+    while ( base_pfn < end_pfn )
+    {
+        intel_iommu_map_page(d, base_pfn, base_pfn);
+        base_pfn++;
+    }
 
     if ( domain_context_mapped(bus, devfn) == 0 )
         ret = domain_context_mapping(d, bus, devfn);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.