[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [RFC KERNEL PATCH v2 2/3] xen/pvh: Unmask irq for passthrough device in PVH dom0


  • To: Stefano Stabellini <sstabellini@xxxxxxxxxx>, "Chen, Jiqian" <Jiqian.Chen@xxxxxxx>
  • From: Juergen Gross <jgross@xxxxxxxx>
  • Date: Thu, 7 Dec 2023 07:43:48 +0100
  • Authentication-results: smtp-out1.suse.de; dkim=none; dmarc=fail reason="No valid SPF, No valid DKIM" header.from=suse.com (policy=quarantine); spf=fail (smtp-out1.suse.de: domain of jgross@xxxxxxxx does not designate 2a07:de40:b281:104:10:150:64:97 as permitted sender) smtp.mailfrom=jgross@xxxxxxxx
  • Autocrypt: addr=jgross@xxxxxxxx; keydata= xsBNBFOMcBYBCACgGjqjoGvbEouQZw/ToiBg9W98AlM2QHV+iNHsEs7kxWhKMjrioyspZKOB ycWxw3ie3j9uvg9EOB3aN4xiTv4qbnGiTr3oJhkB1gsb6ToJQZ8uxGq2kaV2KL9650I1SJve dYm8Of8Zd621lSmoKOwlNClALZNew72NjJLEzTalU1OdT7/i1TXkH09XSSI8mEQ/ouNcMvIJ NwQpd369y9bfIhWUiVXEK7MlRgUG6MvIj6Y3Am/BBLUVbDa4+gmzDC9ezlZkTZG2t14zWPvx XP3FAp2pkW0xqG7/377qptDmrk42GlSKN4z76ELnLxussxc7I2hx18NUcbP8+uty4bMxABEB AAHNH0p1ZXJnZW4gR3Jvc3MgPGpncm9zc0BzdXNlLmNvbT7CwHkEEwECACMFAlOMcK8CGwMH CwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAAKCRCw3p3WKL8TL8eZB/9G0juS/kDY9LhEXseh mE9U+iA1VsLhgDqVbsOtZ/S14LRFHczNd/Lqkn7souCSoyWsBs3/wO+OjPvxf7m+Ef+sMtr0 G5lCWEWa9wa0IXx5HRPW/ScL+e4AVUbL7rurYMfwCzco+7TfjhMEOkC+va5gzi1KrErgNRHH kg3PhlnRY0Udyqx++UYkAsN4TQuEhNN32MvN0Np3WlBJOgKcuXpIElmMM5f1BBzJSKBkW0Jc Wy3h2Wy912vHKpPV/Xv7ZwVJ27v7KcuZcErtptDevAljxJtE7aJG6WiBzm+v9EswyWxwMCIO RoVBYuiocc51872tRGywc03xaQydB+9R7BHPzsBNBFOMcBYBCADLMfoA44MwGOB9YT1V4KCy vAfd7E0BTfaAurbG+Olacciz3yd09QOmejFZC6AnoykydyvTFLAWYcSCdISMr88COmmCbJzn sHAogjexXiif6ANUUlHpjxlHCCcELmZUzomNDnEOTxZFeWMTFF9Rf2k2F0Tl4E5kmsNGgtSa aMO0rNZoOEiD/7UfPP3dfh8JCQ1VtUUsQtT1sxos8Eb/HmriJhnaTZ7Hp3jtgTVkV0ybpgFg w6WMaRkrBh17mV0z2ajjmabB7SJxcouSkR0hcpNl4oM74d2/VqoW4BxxxOD1FcNCObCELfIS auZx+XT6s+CE7Qi/c44ibBMR7hyjdzWbABEBAAHCwF8EGAECAAkFAlOMcBYCGwwACgkQsN6d 1ii/Ey9D+Af/WFr3q+bg/8v5tCknCtn92d5lyYTBNt7xgWzDZX8G6/pngzKyWfedArllp0Pn fgIXtMNV+3t8Li1Tg843EXkP7+2+CQ98MB8XvvPLYAfW8nNDV85TyVgWlldNcgdv7nn1Sq8g HwB2BHdIAkYce3hEoDQXt/mKlgEGsLpzJcnLKimtPXQQy9TxUaLBe9PInPd+Ohix0XOlY+Uk QFEx50Ki3rSDl2Zt2tnkNYKUCvTJq7jvOlaPd6d/W0tZqpyy7KVay+K4aMobDsodB3dvEAs6 ScCnh03dDAFgIq5nsB11j3KPKdVoPlfucX2c7kGNH+LUMbzqV6beIENfNexkOfxHfw==
  • Cc: Jan Beulich <jbeulich@xxxxxxxx>, Roger Pau Monné <roger.pau@xxxxxxxxxx>, Oleksandr Tyshchenko <oleksandr_tyshchenko@xxxxxxxx>, Thomas Gleixner <tglx@xxxxxxxxxxxxx>, Boris Ostrovsky <boris.ostrovsky@xxxxxxxxxx>, "Rafael J . Wysocki" <rafael@xxxxxxxxxx>, Len Brown <lenb@xxxxxxxxxx>, Bjorn Helgaas <bhelgaas@xxxxxxxxxx>, "xen-devel@xxxxxxxxxxxxxxxxxxxx" <xen-devel@xxxxxxxxxxxxxxxxxxxx>, "linux-kernel@xxxxxxxxxxxxxxx" <linux-kernel@xxxxxxxxxxxxxxx>, "linux-acpi@xxxxxxxxxxxxxxx" <linux-acpi@xxxxxxxxxxxxxxx>, "Stabellini, Stefano" <stefano.stabellini@xxxxxxx>, "Deucher, Alexander" <Alexander.Deucher@xxxxxxx>, "Koenig, Christian" <Christian.Koenig@xxxxxxx>, "Hildebrand, Stewart" <Stewart.Hildebrand@xxxxxxx>, "Ragiadakou, Xenia" <Xenia.Ragiadakou@xxxxxxx>, "Huang, Honglei1" <Honglei1.Huang@xxxxxxx>, "Zhang, Julia" <Julia.Zhang@xxxxxxx>, "Huang, Ray" <Ray.Huang@xxxxxxx>
  • Delivery-date: Thu, 07 Dec 2023 06:44:01 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On 07.12.23 03:18, Stefano Stabellini wrote:
On Tue, 5 Dec 2023, Chen, Jiqian wrote:
When PVH dom0 enable a device, it will get trigger and polarity from ACPI (see 
acpi_pci_irq_enable)
I have a version of patch which tried that way, see below:

This approach looks much better. I think this patch is OKish. Juergen,
what do you think?

The approach seems to be fine.


Juergen



diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index ada3868c02c2..43e1bda9f946 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -1,6 +1,7 @@
  // SPDX-License-Identifier: GPL-2.0
  #include <linux/acpi.h>
  #include <linux/export.h>
+#include <linux/pci.h>

  #include <xen/hvc-console.h>

@@ -25,6 +26,127 @@
  bool __ro_after_init xen_pvh;
  EXPORT_SYMBOL_GPL(xen_pvh);

+typedef struct gsi_info {
+       int gsi;
+       int trigger;
+       int polarity;
+       int pirq;
+} gsi_info_t;
+
+struct acpi_prt_entry {
+       struct acpi_pci_id      id;
+       u8                      pin;
+       acpi_handle             link;
+       u32                     index;          /* GSI, or link _CRS index */
+};
+
+static int xen_pvh_get_gsi_info(struct pci_dev *dev,
+                                                               gsi_info_t 
*gsi_info)
+{
+       int gsi;
+       u8 pin = 0;
+       struct acpi_prt_entry *entry;
+       int trigger = ACPI_LEVEL_SENSITIVE;
+       int polarity = acpi_irq_model == ACPI_IRQ_MODEL_GIC ?
+                                     ACPI_ACTIVE_HIGH : ACPI_ACTIVE_LOW;
+
+       if (dev)
+               pin = dev->pin;
+       if (!pin) {
+               xen_raw_printk("No interrupt pin configured\n");
+               return -EINVAL;
+       }
+
+       entry = acpi_pci_irq_lookup(dev, pin);
+       if (entry) {
+               if (entry->link)
+                       gsi = acpi_pci_link_allocate_irq(entry->link,
+                                                        entry->index,
+                                                        &trigger, &polarity,
+                                                        NULL);
+               else
+                       gsi = entry->index;
+       } else
+               return -EINVAL;
+
+       gsi_info->gsi = gsi;
+       gsi_info->trigger = trigger;
+       gsi_info->polarity = polarity;
+
+       return 0;
+}
+
+static int xen_pvh_map_pirq(gsi_info_t *gsi_info)
+{
+       struct physdev_map_pirq map_irq;
+       int ret;
+
+       map_irq.domid = DOMID_SELF;
+       map_irq.type = MAP_PIRQ_TYPE_GSI;
+       map_irq.index = gsi_info->gsi;
+       map_irq.pirq = gsi_info->gsi;
+
+       ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
+       gsi_info->pirq = map_irq.pirq;
+
+       return ret;
+}
+
+static int xen_pvh_unmap_pirq(gsi_info_t *gsi_info)
+{
+       struct physdev_unmap_pirq unmap_irq;
+
+       unmap_irq.domid = DOMID_SELF;
+       unmap_irq.pirq = gsi_info->pirq;
+
+       return HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
+}
+
+static int xen_pvh_setup_gsi(gsi_info_t *gsi_info)
+{
+       struct physdev_setup_gsi setup_gsi;
+
+       setup_gsi.gsi = gsi_info->gsi;
+       setup_gsi.triggering = (gsi_info->trigger == ACPI_EDGE_SENSITIVE ? 0 : 
1);
+       setup_gsi.polarity = (gsi_info->polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
+
+       return HYPERVISOR_physdev_op(PHYSDEVOP_setup_gsi, &setup_gsi);
+}
+
+int xen_pvh_passthrough_gsi(struct pci_dev *dev)
+{
+       int ret;
+       gsi_info_t gsi_info;
+
+       if (!dev) {
+               return -EINVAL;
+       }
+
+       ret = xen_pvh_get_gsi_info(dev, &gsi_info);
+       if (ret) {
+               xen_raw_printk("Fail to get gsi info!\n");
+               return ret;
+       }
+
+       ret = xen_pvh_map_pirq(&gsi_info);
+       if (ret) {
+               xen_raw_printk("Fail to map pirq for gsi (%d)!\n", 
gsi_info.gsi);
+               return ret;
+       }
+
+       ret = xen_pvh_setup_gsi(&gsi_info);
+       if (ret == -EEXIST) {
+               ret = 0;
+               xen_raw_printk("Already setup the GSI :%u\n", gsi_info.gsi);
+       } else if (ret) {
+               xen_raw_printk("Fail to setup gsi (%d)!\n", gsi_info.gsi);
+               xen_pvh_unmap_pirq(&gsi_info);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(xen_pvh_passthrough_gsi);
+
  void __init xen_pvh_init(struct boot_params *boot_params)
  {
         u32 msr;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index ff30ceca2203..630fe0a34bc6 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -288,7 +288,7 @@ static int acpi_reroute_boot_interrupt(struct pci_dev *dev,
  }
  #endif /* CONFIG_X86_IO_APIC */

-static struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
+struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin)
  {
         struct acpi_prt_entry *entry = NULL;
         struct pci_dev *bridge;
diff --git a/drivers/xen/xen-pciback/pci_stub.c 
b/drivers/xen/xen-pciback/pci_stub.c
index e34b623e4b41..1abd4dad6f40 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -20,6 +20,7 @@
  #include <linux/atomic.h>
  #include <xen/events.h>
  #include <xen/pci.h>
+#include <xen/acpi.h>
  #include <xen/xen.h>
  #include <asm/xen/hypervisor.h>
  #include <xen/interface/physdev.h>
@@ -399,6 +400,12 @@ static int pcistub_init_device(struct pci_dev *dev)
         if (err)
                 goto config_release;

+       if (xen_initial_domain() && xen_pvh_domain()) {
+               err = xen_pvh_passthrough_gsi(dev);
+               if (err)
+                       goto config_release;
+       }
+
         if (dev->msix_cap) {
                 struct physdev_pci_device ppdev = {
                         .seg = pci_domain_nr(dev->bus),
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 641dc4843987..368d56ba2c5e 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -375,6 +375,7 @@ void acpi_unregister_gsi (u32 gsi);

  struct pci_dev;

+struct acpi_prt_entry *acpi_pci_irq_lookup(struct pci_dev *dev, int pin);
  int acpi_pci_irq_enable (struct pci_dev *dev);
  void acpi_penalize_isa_irq(int irq, int active);
  bool acpi_isa_irq_available(int irq);
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index b1e11863144d..ce7f5554f88e 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -67,6 +67,7 @@ static inline void xen_acpi_sleep_register(void)
                 acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel;
         }
  }
+int xen_pvh_passthrough_gsi(struct pci_dev *dev);
  #else
  static inline void xen_acpi_sleep_register(void)
  {


Jan

--
Best regards,
Jiqian Chen.


Attachment: OpenPGP_0xB0DE9DD628BF132F.asc
Description: OpenPGP public key

Attachment: OpenPGP_signature.asc
Description: OpenPGP digital signature


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.