[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/3] amd iommu: use PCI macros



... instead of open coding them.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/xen/drivers/passthrough/amd/iommu_init.c
+++ b/xen/drivers/passthrough/amd/iommu_init.c
@@ -455,9 +455,9 @@ static void iommu_msi_set_affinity(struc
     unsigned int dest;
     struct amd_iommu *iommu = desc->action->dev_id;
     u16 seg = iommu->seg;
-    u8 bus = (iommu->bdf >> 8) & 0xff;
-    u8 dev = PCI_SLOT(iommu->bdf & 0xff);
-    u8 func = PCI_FUNC(iommu->bdf & 0xff);
+    u8 bus = PCI_BUS(iommu->bdf);
+    u8 dev = PCI_SLOT(iommu->bdf);
+    u8 func = PCI_FUNC(iommu->bdf);
 
     dest = set_desc_affinity(desc, mask);
 
@@ -495,13 +495,13 @@ static void iommu_msi_set_affinity(struc
 static void amd_iommu_msi_enable(struct amd_iommu *iommu, int flag)
 {
     u16 control;
-    int bus = (iommu->bdf >> 8) & 0xff;
-    int dev = PCI_SLOT(iommu->bdf & 0xff);
-    int func = PCI_FUNC(iommu->bdf & 0xff);
+    int bus = PCI_BUS(iommu->bdf);
+    int dev = PCI_SLOT(iommu->bdf);
+    int func = PCI_FUNC(iommu->bdf);
 
     control = pci_conf_read16(iommu->seg, bus, dev, func,
         iommu->msi_cap + PCI_MSI_FLAGS);
-    control &= ~(1);
+    control &= ~PCI_MSI_FLAGS_ENABLE;
     if ( flag )
         control |= flag;
     pci_conf_write16(iommu->seg, bus, dev, func,
--- a/xen/drivers/passthrough/amd/iommu_intr.c
+++ b/xen/drivers/passthrough/amd/iommu_intr.c
@@ -272,7 +272,7 @@ static void update_intremap_entry_from_m
     spinlock_t *lock;
     int offset;
 
-    bdf = (pdev->bus << 8) | pdev->devfn;
+    bdf = PCI_BDF2(pdev->bus, pdev->devfn);
     req_id = get_dma_requestor_id(pdev->seg, bdf);
     alias_id = get_intremap_requestor_id(pdev->seg, bdf);
 
@@ -340,17 +340,16 @@ void amd_iommu_msi_msg_update_ire(
     struct msi_desc *msi_desc, struct msi_msg *msg)
 {
     struct pci_dev *pdev = msi_desc->dev;
-    struct amd_iommu *iommu = NULL;
+    int bdf = PCI_BDF2(pdev->bus, pdev->devfn);
+    struct amd_iommu *iommu;
 
     if ( !iommu_intremap )
         return;
 
-    iommu = find_iommu_for_device(pdev->seg, (pdev->bus << 8) | pdev->devfn);
-
+    iommu = find_iommu_for_device(pdev->seg, bdf);
     if ( !iommu )
     {
-        AMD_IOMMU_DEBUG("Fail to find iommu for MSI device id = 0x%x\n",
-                       (pdev->bus << 8) | pdev->devfn);
+        AMD_IOMMU_DEBUG("Fail to find iommu for MSI device id = %#x\n", bdf);
         return;
     }
 
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -597,7 +597,7 @@ static int update_paging_mode(struct dom
         /* Update device table entries using new root table and paging mode */
         for_each_pdev( d, pdev )
         {
-            bdf = (pdev->bus << 8) | pdev->devfn;
+            bdf = PCI_BDF2(pdev->bus, pdev->devfn);
             req_id = get_dma_requestor_id(pdev->seg, bdf);
             iommu = find_iommu_for_device(pdev->seg, bdf);
             if ( !iommu )
--- a/xen/drivers/passthrough/amd/pci_amd_iommu.c
+++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c
@@ -373,7 +373,7 @@ static int reassign_device( struct domai
 static int amd_iommu_assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
 {
     struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
-    int bdf = (bus << 8) | devfn;
+    int bdf = PCI_BDF2(bus, devfn);
     int req_id = get_dma_requestor_id(seg, bdf);
 
     if ( ivrs_mappings[req_id].unity_map_enable )
@@ -499,12 +499,9 @@ static int amd_iommu_remove_device(struc
 
 static int amd_iommu_group_id(u16 seg, u8 bus, u8 devfn)
 {
-    int rt;
-    int bdf = (bus << 8) | devfn;
-    rt = ( bdf < ivrs_bdf_entries ) ?
-        get_dma_requestor_id(seg, bdf) :
-        bdf;
-    return rt;
+    int bdf = PCI_BDF2(bus, devfn);
+
+    return (bdf < ivrs_bdf_entries) ? get_dma_requestor_id(seg, bdf) : bdf;
 }
 
 #include <asm/io_apic.h>



Attachment: amd-iommu-PCI-macros.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.