[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 07/14] AMD/IOMMU: pass IOMMU to {get, free, update}_intremap_entry()


  • To: "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • From: Jan Beulich <JBeulich@xxxxxxxx>
  • Date: Tue, 16 Jul 2019 16:37:51 +0000
  • Accept-language: en-US
  • Arc-authentication-results: i=1; mx.microsoft.com 1;spf=pass smtp.mailfrom=suse.com;dmarc=pass action=none header.from=suse.com;dkim=pass header.d=suse.com;arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=pbmVG+xtC/ndA7O3oJS9moz976STYe8Tu1Ukyrkn5Io=; b=m7WLTDx1OqelvvVXvvleOfLYiQcuWgtWoAQzdiJrmk0QJgxA2A5oOaue2qYMbO4XPEuF2qXArPN5HVnJP84jWpdFr8q7afOuZfjJcu8J8cn+mE1wudfErktgTSKRnQjDCHWvOsVMkJqYj7sGAiySAeXlP8rQ7JWzEFnAtLuo2rac6u3sEa1sDlTqD04+m/0u6RTCQYwUNhSRL82ENBu8PY0bHHNH+1rEqi7Aoynx/9apP2egMuPz8J5iiuKnK2CCJhLoYfQnwILMIJmRDy3GbquC3vHHdx3IgvR8Mepxhq6rCjcSWNPj08P8xGHp7EN7ydr6A1/1t+iRegxiaiC65g==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=WrCpBH91lv+oukmhUXTGXMMr/D22A36kJbjkqMyoLgSsDLi4W1NeIuTFYnP88VYFkenmByQy6VCXLmixCyaMKe8HkocgFEBypDeN2KAx3iUZx7lmXAkfyqC14HKEbsm9fvb4SC5YrEkt/+XPwulZFZr4P9rHY5+Qe99gZ6nxp9LFKLVmPwysF1uSOsCDvoji5J1R/6qOtpEzZ1g4IDOj0KbI+HLvkqkTPZP1SbfJsQlHxhvm4tIpmXfIXbVrlVoZ+NGoEXqHr8U8hUVAqxKq9AiKrSH+80IzsTbVDQ/EnStGdRb7JEgM0IiUxOUEQGEyXz/5VFgRLitH8GR3Oc0qxw==
  • Authentication-results: spf=none (sender IP is ) smtp.mailfrom=JBeulich@xxxxxxxx;
  • Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>, Brian Woods <brian.woods@xxxxxxx>, Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx>
  • Delivery-date: Tue, 16 Jul 2019 16:40:05 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Thread-index: AQHVO/TPRJnsmnG7IEW/OeCiigfJ2A==
  • Thread-topic: [PATCH v3 07/14] AMD/IOMMU: pass IOMMU to {get,free,update}_intremap_entry()

The functions will want to know IOMMU properties (specifically the IRTE
size) subsequently.

Rather than introducing a second error path bogusly returning -E... from
amd_iommu_read_ioapic_from_ire(), also change the existing one to follow
VT-d in returning the raw (untranslated) IO-APIC RTE.

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v3: New.

--- a/xen/drivers/passthrough/amd/iommu_intr.c
+++ b/xen/drivers/passthrough/amd/iommu_intr.c
@@ -123,11 +123,11 @@ static unsigned int alloc_intremap_entry
      return slot;
  }
  
-static union irte_ptr get_intremap_entry(unsigned int seg, unsigned int bdf,
-                                         unsigned int index)
+static union irte_ptr get_intremap_entry(const struct amd_iommu *iommu,
+                                         unsigned int bdf, unsigned int index)
  {
      union irte_ptr table = {
-        .ptr = get_ivrs_mappings(seg)[bdf].intremap_table
+        .ptr = get_ivrs_mappings(iommu->seg)[bdf].intremap_table
      };
  
      ASSERT(table.ptr && (index < INTREMAP_ENTRIES));
@@ -137,18 +137,19 @@ static union irte_ptr get_intremap_entry
      return table;
  }
  
-static void free_intremap_entry(unsigned int seg, unsigned int bdf,
-                                unsigned int index)
+static void free_intremap_entry(const struct amd_iommu *iommu,
+                                unsigned int bdf, unsigned int index)
  {
-    union irte_ptr entry = get_intremap_entry(seg, bdf, index);
+    union irte_ptr entry = get_intremap_entry(iommu, bdf, index);
  
      ACCESS_ONCE(entry.ptr32->raw[0]) = 0;
  
-    __clear_bit(index, get_ivrs_mappings(seg)[bdf].intremap_inuse);
+    __clear_bit(index, get_ivrs_mappings(iommu->seg)[bdf].intremap_inuse);
  }
  
-static void update_intremap_entry(union irte_ptr entry, unsigned int vector,
-                                  unsigned int int_type,
+static void update_intremap_entry(const struct amd_iommu *iommu,
+                                  union irte_ptr entry,
+                                  unsigned int vector, unsigned int int_type,
                                    unsigned int dest_mode, unsigned int dest)
  {
      struct irte_basic basic = {
@@ -212,7 +213,7 @@ static int update_intremap_entry_from_io
          lo_update = 1;
      }
  
-    entry = get_intremap_entry(iommu->seg, req_id, offset);
+    entry = get_intremap_entry(iommu, req_id, offset);
      if ( !lo_update )
      {
          /*
@@ -223,7 +224,7 @@ static int update_intremap_entry_from_io
          vector = entry.ptr32->basic.vector;
          delivery_mode = entry.ptr32->basic.int_type;
      }
-    update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
+    update_intremap_entry(iommu, entry, vector, delivery_mode, dest_mode, 
dest);
  
      spin_unlock_irqrestore(lock, flags);
  
@@ -288,8 +289,8 @@ int __init amd_iommu_setup_ioapic_remapp
              spin_lock_irqsave(lock, flags);
              offset = alloc_intremap_entry(seg, req_id, 1);
              BUG_ON(offset >= INTREMAP_ENTRIES);
-            entry = get_intremap_entry(iommu->seg, req_id, offset);
-            update_intremap_entry(entry, vector,
+            entry = get_intremap_entry(iommu, req_id, offset);
+            update_intremap_entry(iommu, entry, vector,
                                    delivery_mode, dest_mode, dest);
              spin_unlock_irqrestore(lock, flags);
  
@@ -413,7 +414,7 @@ unsigned int amd_iommu_read_ioapic_from_
  
      idx = ioapic_id_to_index(IO_APIC_ID(apic));
      if ( idx == MAX_IO_APICS )
-        return -EINVAL;
+        return val;
  
      offset = ioapic_sbdf[idx].pin_2_idx[pin];
  
@@ -422,9 +423,13 @@ unsigned int amd_iommu_read_ioapic_from_
          u16 bdf = ioapic_sbdf[idx].bdf;
          u16 seg = ioapic_sbdf[idx].seg;
          u16 req_id = get_intremap_requestor_id(seg, bdf);
-        union irte_ptr entry = get_intremap_entry(seg, req_id, offset);
+        const struct amd_iommu *iommu = find_iommu_for_device(seg, bdf);
+        union irte_ptr entry;
  
+        if ( !iommu )
+            return val;
          ASSERT(offset == (val & (INTREMAP_ENTRIES - 1)));
+        entry = get_intremap_entry(iommu, req_id, offset);
          val &= ~(INTREMAP_ENTRIES - 1);
          val |= MASK_INSR(entry.ptr32->basic.int_type,
                           IO_APIC_REDIR_DELIV_MODE_MASK);
@@ -454,7 +459,7 @@ static int update_intremap_entry_from_ms
          lock = get_intremap_lock(iommu->seg, req_id);
          spin_lock_irqsave(lock, flags);
          for ( i = 0; i < nr; ++i )
-            free_intremap_entry(iommu->seg, req_id, *remap_index + i);
+            free_intremap_entry(iommu, req_id, *remap_index + i);
          spin_unlock_irqrestore(lock, flags);
          goto done;
      }
@@ -479,8 +484,8 @@ static int update_intremap_entry_from_ms
          *remap_index = offset;
      }
  
-    entry = get_intremap_entry(iommu->seg, req_id, offset);
-    update_intremap_entry(entry, vector, delivery_mode, dest_mode, dest);
+    entry = get_intremap_entry(iommu, req_id, offset);
+    update_intremap_entry(iommu, entry, vector, delivery_mode, dest_mode, 
dest);
      spin_unlock_irqrestore(lock, flags);
  
      *data = (msg->data & ~(INTREMAP_ENTRIES - 1)) | offset;
@@ -594,12 +599,13 @@ void amd_iommu_read_msi_from_ire(
      const struct pci_dev *pdev = msi_desc->dev;
      u16 bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf;
      u16 seg = pdev ? pdev->seg : hpet_sbdf.seg;
+    const struct amd_iommu *iommu = _find_iommu_for_device(seg, bdf);
      union irte_ptr entry;
  
-    if ( IS_ERR_OR_NULL(_find_iommu_for_device(seg, bdf)) )
+    if ( IS_ERR_OR_NULL(iommu) )
          return;
  
-    entry = get_intremap_entry(seg, get_dma_requestor_id(seg, bdf), offset);
+    entry = get_intremap_entry(iommu, get_dma_requestor_id(seg, bdf), offset);
  
      if ( msi_desc->msi_attrib.type == PCI_CAP_ID_MSI )
      {

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.