[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] amd iommu: enable ats devices



# HG changeset patch
# User Wei Wang <wei.wang2@xxxxxxx>
# Date 1320748013 -3600
# Node ID 452d9143687f826a30d0e56b7ba3b9783fb6bf24
# Parent  00ab8c0299e2a02618f8e83225db7d61c165f039
amd iommu: enable ats devices

Signed-off-by: Wei Wang <wei.wang2@xxxxxxx>
Committed-by: Jan Beulich <jbeulich@xxxxxxxx>
---


diff -r 00ab8c0299e2 -r 452d9143687f xen/drivers/passthrough/amd/iommu_map.c
--- a/xen/drivers/passthrough/amd/iommu_map.c   Tue Nov 08 11:26:13 2011 +0100
+++ b/xen/drivers/passthrough/amd/iommu_map.c   Tue Nov 08 11:26:53 2011 +0100
@@ -370,6 +370,17 @@
     dte[0] = entry;
 }
 
+void iommu_dte_set_iotlb(u32 *dte, u8 i)
+{
+    u32 entry;
+
+    entry = dte[3];
+    set_field_in_reg_u32(!!i, entry,
+                         IOMMU_DEV_TABLE_IOTLB_SUPPORT_MASK,
+                         IOMMU_DEV_TABLE_IOTLB_SUPPORT_SHIFT, &entry);
+    dte[3] = entry;
+}
+
 void __init amd_iommu_set_intremap_table(
     u32 *dte, u64 intremap_ptr, u8 int_valid)
 {
diff -r 00ab8c0299e2 -r 452d9143687f xen/drivers/passthrough/amd/pci_amd_iommu.c
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c       Tue Nov 08 11:26:13 
2011 +0100
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c       Tue Nov 08 11:26:53 
2011 +0100
@@ -25,6 +25,7 @@
 #include <asm/hvm/iommu.h>
 #include <asm/amd-iommu.h>
 #include <asm/hvm/svm/amd-iommu-proto.h>
+#include "../ats.h"
 
 struct amd_iommu *find_iommu_for_device(int seg, int bdf)
 {
@@ -86,6 +87,9 @@
     void *dte;
     unsigned long flags;
     int req_id, valid = 1;
+    int dte_i = 0;
+    u8 bus = PCI_BUS(bdf);
+    u8 devfn = PCI_DEVFN2(bdf);
 
     struct hvm_iommu *hd = domain_hvm_iommu(domain);
 
@@ -94,6 +98,9 @@
     if ( iommu_passthrough && (domain->domain_id == 0) )
         valid = 0;
 
+    if ( ats_enabled )
+        dte_i = 1;
+
     /* get device-table entry */
     req_id = get_dma_requestor_id(iommu->seg, bdf);
     dte = iommu->dev_table.buffer + (req_id * IOMMU_DEV_TABLE_ENTRY_SIZE);
@@ -107,6 +114,10 @@
             (u32 *)dte, page_to_maddr(hd->root_table), hd->domain_id,
             hd->paging_mode, valid);
 
+        if ( pci_ats_device(iommu->seg, bus, devfn) &&
+             iommu->iotlb_support )
+            iommu_dte_set_iotlb((u32 *)dte, dte_i);
+
         invalidate_dev_table_entry(iommu, req_id);
         flush_command_buffer(iommu);
 
@@ -118,11 +129,27 @@
     }
 
     spin_unlock_irqrestore(&iommu->lock, flags);
+
+    ASSERT(spin_is_locked(&pcidevs_lock));
+
+    if ( pci_ats_device(iommu->seg, bus, devfn) &&
+         !pci_ats_enabled(iommu->seg, bus, devfn) )
+    {
+        struct pci_dev *pdev;
+
+        enable_ats_device(iommu->seg, bus, devfn);
+
+        ASSERT(spin_is_locked(&pcidevs_lock));
+        pdev = pci_get_pdev(iommu->seg, bus, devfn);
+
+        ASSERT( pdev != NULL );
+        amd_iommu_flush_iotlb(pdev, INV_IOMMU_ALL_PAGES_ADDRESS, 0);
+    }
 }
 
 static void __init amd_iommu_setup_dom0_device(struct pci_dev *pdev)
 {
-    int bdf = (pdev->bus << 8) | pdev->devfn;
+    int bdf = PCI_BDF2(pdev->bus, pdev->devfn);
     struct amd_iommu *iommu = find_iommu_for_device(pdev->seg, bdf);
 
     if ( likely(iommu != NULL) )
@@ -261,12 +288,14 @@
     setup_dom0_pci_devices(d, amd_iommu_setup_dom0_device);
 }
 
-static void amd_iommu_disable_domain_device(
-    struct domain *domain, struct amd_iommu *iommu, int bdf)
+void amd_iommu_disable_domain_device(struct domain *domain,
+                                     struct amd_iommu *iommu, int bdf)
 {
     void *dte;
     unsigned long flags;
     int req_id;
+    u8 bus = PCI_BUS(bdf);
+    u8 devfn = PCI_DEVFN2(bdf);
 
     BUG_ON ( iommu->dev_table.buffer == NULL );
     req_id = get_dma_requestor_id(iommu->seg, bdf);
@@ -276,6 +305,11 @@
     if ( is_translation_valid((u32 *)dte) )
     {
         disable_translation((u32 *)dte);
+
+        if ( pci_ats_device(iommu->seg, bus, devfn) &&
+             iommu->iotlb_support )
+            iommu_dte_set_iotlb((u32 *)dte, 0);
+
         invalidate_dev_table_entry(iommu, req_id);
         flush_command_buffer(iommu);
         AMD_IOMMU_DEBUG("Disable: device id = 0x%04x, "
@@ -284,6 +318,12 @@
                         domain_hvm_iommu(domain)->paging_mode);
     }
     spin_unlock_irqrestore(&iommu->lock, flags);
+
+    ASSERT(spin_is_locked(&pcidevs_lock));
+
+    if ( pci_ats_device(iommu->seg, bus, devfn) &&
+         pci_ats_enabled(iommu->seg, bus, devfn) )
+        disable_ats_device(iommu->seg, bus, devfn);
 }
 
 static int reassign_device( struct domain *source, struct domain *target,
@@ -299,7 +339,7 @@
     if ( !pdev )
         return -ENODEV;
 
-    bdf = (bus << 8) | devfn;
+    bdf = PCI_BDF2(bus, devfn);
     iommu = find_iommu_for_device(seg, bdf);
     if ( !iommu )
     {
@@ -421,7 +461,7 @@
     if ( !pdev->domain )
         return -EINVAL;
 
-    bdf = (pdev->bus << 8) | pdev->devfn;
+    bdf = PCI_BDF2(pdev->bus, pdev->devfn);
     iommu = find_iommu_for_device(pdev->seg, bdf);
     if ( !iommu )
     {
@@ -443,7 +483,7 @@
     if ( !pdev->domain )
         return -EINVAL;
 
-    bdf = (pdev->bus << 8) | pdev->devfn;
+    bdf = PCI_BDF2(pdev->bus, pdev->devfn);
     iommu = find_iommu_for_device(pdev->seg, bdf);
     if ( !iommu )
     {
diff -r 00ab8c0299e2 -r 452d9143687f 
xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
--- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Tue Nov 08 11:26:13 
2011 +0100
+++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h     Tue Nov 08 11:26:53 
2011 +0100
@@ -75,6 +75,7 @@
     u32 *dte, u64 intremap_ptr, u8 int_valid);
 void amd_iommu_set_root_page_table(
     u32 *dte, u64 root_ptr, u16 domain_id, u8 paging_mode, u8 valid);
+void iommu_dte_set_iotlb(u32 *dte, u8 i);
 void invalidate_dev_table_entry(struct amd_iommu *iommu, u16 devic_id);
 
 /* send cmd to iommu */

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.