[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 04/12] AMD/IOMMU: pass IOMMU to {get, free, update}_intremap_entry()


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <JBeulich@xxxxxxxx>
  • Date: Thu, 25 Jul 2019 13:30:39 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1;spf=pass smtp.mailfrom=suse.com;dmarc=pass action=none header.from=suse.com;dkim=pass header.d=suse.com;arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=FDL9ILH2OxIhACREGC1gHdTJf5aWsAE09jnI9Jm4K2o=; b=B8ImlehruzPOczlpvfIKOxlLZ/byEU7St/b1Ks+LbFdc3nyJg8oLFgw5/X1dt4b+BOIP/Dl2TjJEfwYbUHQYtQ70RDr8lEozV/3gH20WeaOFPfDsMuapnLH6COU/MAWZDNcrG6Ssrv+NEnK41uae2oy5D31rBHkCD8xwWRH67OEn7wd6Xu/W9qzBWhIdzm8EFnnsZHjA6Zzy6Oov3j3A3TUl2oN5YGcLSgA3ydM7iHScuklYyudxQizAPFgH73hrVbdah7KJbnSx7tPcJBiu+UssydNXIO4Jrcn3wr4dxNWIxaLzZ24mVEDAOiDk1o1l3lh6fzmnkIcnv91WwRTs5A==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=YL1KfitAA54wsKg1LOUd3iJPtszODz8YBdueJDZqiJidfHpprYGhiCdvOLePgtsHhw3YJc80KcVq9aVUSlP7wPKWyzjM4cWAgrfHnVpPEIbMSUboJwZ9hjlcNg+BnaKqQ7aNGSny37nZfViJinC8GKq+TRTPXYho+RxbqU19IsndF9hfplKRPnwm762cizvUmc5L3tGH4dcRN19sjHbae0xkW2VGzqlBqyy4sZ7bo5+Q+SVlfBj6i6FwmiY/ntoduT3p4+sWu9NpYh29v1zq8f5uLJ7p2lF2mRk4wHb2u5fnJGoM55gmMd77gQ4i8H9N9K8HkGLwxwXsFkGgeoQgig==
  • Authentication-results: spf=none (sender IP is ) smtp.mailfrom=JBeulich@xxxxxxxx;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Brian Woods <brian.woods@xxxxxxx>, Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
  • Delivery-date: Thu, 25 Jul 2019 13:33:47 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHVQu0mr/8DrTzYqUOeY8wBlnBXMQ==
  • Thread-topic: [PATCH v4 04/12] AMD/IOMMU: pass IOMMU to {get,free,update}_intremap_entry()

The functions will want to know IOMMU properties (specifically the IRTE
size) subsequently.

Rather than introducing a second error path bogusly returning -E... from
amd_iommu_read_ioapic_from_ire(), also change the existing one to follow
VT-d in returning the raw (untranslated) IO-APIC RTE.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Acked-by: Brian Woods <brian.woods@xxxxxxx>
---
v3: New.

--- a/xen/drivers/passthrough/amd/iommu_intr.c
+++ b/xen/drivers/passthrough/amd/iommu_intr.c
@@ -122,11 +122,11 @@ static unsigned int alloc_intremap_entry
      return slot;
  }
  
-static union irte_ptr get_intremap_entry(unsigned int seg, unsigned int bdf,
-                                         unsigned int index)
+static union irte_ptr get_intremap_entry(const struct amd_iommu *iommu,
+                                         unsigned int bdf, unsigned int index)
  {
      union irte_ptr table = {
-        .ptr = get_ivrs_mappings(seg)[bdf].intremap_table
+        .ptr = get_ivrs_mappings(iommu->seg)[bdf].intremap_table
      };
  
      ASSERT(table.ptr && (index < INTREMAP_ENTRIES));
@@ -136,18 +136,19 @@ static union irte_ptr get_intremap_entry
      return table;
  }
  
-static void free_intremap_entry(unsigned int seg, unsigned int bdf,
-                                unsigned int index)
+static void free_intremap_entry(const struct amd_iommu *iommu,
+                                unsigned int bdf, unsigned int index)
  {
-    union irte_ptr entry = get_intremap_entry(seg, bdf, index);
+    union irte_ptr entry = get_intremap_entry(iommu, bdf, index);
  
      ACCESS_ONCE(entry.ptr32->raw) = 0;
  
-    __clear_bit(index, get_ivrs_mappings(seg)[bdf].intremap_inuse);
+    __clear_bit(index, get_ivrs_mappings(iommu->seg)[bdf].intremap_inuse);
  }
  
-static void update_intremap_entry(union irte_ptr entry, unsigned int vector,
-                                  unsigned int int_type,
+static void update_intremap_entry(const struct amd_iommu *iommu,
+                                  union irte_ptr entry,
+                                  unsigned int vector, unsigned int int_type,
                                    unsigned int dest_mode, unsigned int dest)
  {
      union irte32 irte = {
@@ -212,7 +213,7 @@ static int update_intremap_entry_from_io
          lo_update = 1;
      }
  
-    entry = get_intremap_entry(iommu->seg, req_id, offset);
+    entry = get_intremap_entry(iommu, req_id, offset);
      if ( !lo_update )
      {
          /*
@@ -223,7 +224,7 @@ static int update_intremap_entry_from_io
          vector = entry.ptr32->flds.vector;
          delivery_mode = entry.ptr32->flds.int_type;
      }
-    update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
+    update_intremap_entry(iommu, entry, vector, delivery_mode, dest_mode, 
dest);
  
      spin_unlock_irqrestore(lock, flags);
  
@@ -288,8 +289,8 @@ int __init amd_iommu_setup_ioapic_remapp
              spin_lock_irqsave(lock, flags);
              offset = alloc_intremap_entry(seg, req_id, 1);
              BUG_ON(offset >= INTREMAP_ENTRIES);
-            entry = get_intremap_entry(iommu->seg, req_id, offset);
-            update_intremap_entry(entry, vector,
+            entry = get_intremap_entry(iommu, req_id, offset);
+            update_intremap_entry(iommu, entry, vector,
                                    delivery_mode, dest_mode, dest);
              spin_unlock_irqrestore(lock, flags);
  
@@ -413,7 +414,7 @@ unsigned int amd_iommu_read_ioapic_from_
  
      idx = ioapic_id_to_index(IO_APIC_ID(apic));
      if ( idx == MAX_IO_APICS )
-        return -EINVAL;
+        return val;
  
      offset = ioapic_sbdf[idx].pin_2_idx[pin];
  
@@ -422,9 +423,13 @@ unsigned int amd_iommu_read_ioapic_from_
          u16 bdf = ioapic_sbdf[idx].bdf;
          u16 seg = ioapic_sbdf[idx].seg;
          u16 req_id = get_intremap_requestor_id(seg, bdf);
-        union irte_ptr entry = get_intremap_entry(seg, req_id, offset);
+        const struct amd_iommu *iommu = find_iommu_for_device(seg, bdf);
+        union irte_ptr entry;
  
+        if ( !iommu )
+            return val;
          ASSERT(offset == (val & (INTREMAP_ENTRIES - 1)));
+        entry = get_intremap_entry(iommu, req_id, offset);
          val &= ~(INTREMAP_ENTRIES - 1);
          val |= MASK_INSR(entry.ptr32->flds.int_type,
                           IO_APIC_REDIR_DELIV_MODE_MASK);
@@ -454,7 +459,7 @@ static int update_intremap_entry_from_ms
          lock = get_intremap_lock(iommu->seg, req_id);
          spin_lock_irqsave(lock, flags);
          for ( i = 0; i < nr; ++i )
-            free_intremap_entry(iommu->seg, req_id, *remap_index + i);
+            free_intremap_entry(iommu, req_id, *remap_index + i);
          spin_unlock_irqrestore(lock, flags);
          goto done;
      }
@@ -479,8 +484,8 @@ static int update_intremap_entry_from_ms
          *remap_index = offset;
      }
  
-    entry = get_intremap_entry(iommu->seg, req_id, offset);
-    update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
+    entry = get_intremap_entry(iommu, req_id, offset);
+    update_intremap_entry(iommu, entry, vector, delivery_mode, dest_mode, 
dest);
      spin_unlock_irqrestore(lock, flags);
  
      *data = (msg->data & ~(INTREMAP_ENTRIES - 1)) | offset;
@@ -594,12 +599,13 @@ void amd_iommu_read_msi_from_ire(
      const struct pci_dev *pdev = msi_desc->dev;
      u16 bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf;
      u16 seg = pdev ? pdev->seg : hpet_sbdf.seg;
+    const struct amd_iommu *iommu = _find_iommu_for_device(seg, bdf);
      union irte_ptr entry;
  
-    if ( IS_ERR_OR_NULL(_find_iommu_for_device(seg, bdf)) )
+    if ( IS_ERR_OR_NULL(iommu) )
          return;
  
-    entry = get_intremap_entry(seg, get_dma_requestor_id(seg, bdf), offset);
+    entry = get_intremap_entry(iommu, get_dma_requestor_id(seg, bdf), offset);
  
      if ( msi_desc->msi_attrib.type == PCI_CAP_ID_MSI )
      {

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.