[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] linux-2.6.18/MSI-X: Avoid unconditional BUG() in DomU
msi_remove_pci_irq_vectors() unconditionally (i.e. even in a DomU) calling msi_unmap_pirq() is guaranteed to BUG() in msi_get_dev_owner(). Adjust the function to call evtchn_map_pirq() instead in a DomU (just like pci_disable_msix() is doing). While looking into this I also realized that the CONFIG_XEN_PCIDEV_FRONTEND conditionals are misplaced - since on x86-64 it is possible to build with this disabled, execution would flow into Dom0 code even on a DomU in this case. Hence adjust their placement at once. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/drivers/pci/msi-xen.c +++ b/drivers/pci/msi-xen.c @@ -547,9 +547,8 @@ int pci_enable_msi(struct pci_dev* dev if (status < 0) return status; + if (!is_initial_xendomain()) { #ifdef CONFIG_XEN_PCIDEV_FRONTEND - if (!is_initial_xendomain()) - { int ret; temp = dev->irq; @@ -563,8 +562,10 @@ int pci_enable_msi(struct pci_dev* dev msi_dev_entry->default_irq = temp; return ret; - } +#else + return -EOPNOTSUPP; #endif + } temp = dev->irq; @@ -592,8 +593,8 @@ void pci_disable_msi(struct pci_dev* dev if (!dev) return; -#ifdef CONFIG_XEN_PCIDEV_FRONTEND if (!is_initial_xendomain()) { +#ifdef CONFIG_XEN_PCIDEV_FRONTEND if (!(dev->msi_enabled)) { printk(KERN_INFO "PCI: %s: Device did not enabled MSI.\n", pci_name(dev)); @@ -603,9 +604,9 @@ void pci_disable_msi(struct pci_dev* dev pci_frontend_disable_msi(dev); dev->irq = msi_dev_entry->default_irq; dev->msi_enabled = 0; +#endif return; } -#endif pos = pci_find_capability(dev, PCI_CAP_ID_MSI); if (!pos) @@ -663,8 +664,8 @@ void pci_disable_msix(struct pci_dev *de if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) return -EINVAL; -#ifdef CONFIG_XEN_PCIDEV_FRONTEND if (!is_initial_xendomain()) { +#ifdef CONFIG_XEN_PCIDEV_FRONTEND struct msi_pirq_entry *pirq_entry; int ret, irq; @@ -696,9 +697,11 @@ int pci_enable_msix(struct pci_dev* dev, attach_pirq_entry(irq, entries[i].entry, msi_dev_entry); entries[i].vector = irq; } - return 0; - } + return 0; +#else + return -EOPNOTSUPP; #endif + } status = msi_init(); if (status < 0) @@ -741,8 +744,8 @@ void pci_disable_msix(struct pci_dev *de return; } -#ifdef CONFIG_XEN_PCIDEV_FRONTEND if (!is_initial_xendomain()) { +#ifdef CONFIG_XEN_PCIDEV_FRONTEND struct msi_dev_list *msi_dev_entry; struct msi_pirq_entry *pirq_entry, *tmp; @@ -758,9 +761,9 @@ void pci_msix_shutdown(struct pci_dev *d dev->irq = msi_dev_entry->default_irq; dev->msix_enabled = 0; +#endif return; } -#endif pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); if (!pos) @@ -800,7 +791,10 @@ void msi_remove_pci_irq_vectors(struct p if (!list_empty(&msi_dev_entry->pirq_list_head)) list_for_each_entry_safe(pirq_entry, tmp, &msi_dev_entry->pirq_list_head, list) { - msi_unmap_pirq(dev, pirq_entry->pirq); + if (is_initial_xendomain()) + msi_unmap_pirq(dev, pirq_entry->pirq); + else + evtchn_map_pirq(pirq_entry->pirq, 0); list_del(&pirq_entry->list); kfree(pirq_entry); } _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |