[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 1/1] x86/AMD: Fix setup ssss:bb:dd:f for d0 failed
On 07/08/13 03:40, suravee.suthikulpanit@xxxxxxx wrote: > From: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> > > The host bridge device (i.e. 0x18 for AMD) does not > require IOMMU, and therefore is not included in the IVRS. > The current logic tries to map all PCI devices to an IOMMU. > In this case, "xl dmesg" shows the following message on AMD sytem. Can you elaborate on what you mean by "does not require IOMMU" ? How does anything behind the host bridges get translated if they are in the path of the IOMMU? ~Andrew > > (XEN) setup 0000:00:18.0 for d0 failed (-19) > (XEN) setup 0000:00:18.1 for d0 failed (-19) > (XEN) setup 0000:00:18.2 for d0 failed (-19) > (XEN) setup 0000:00:18.3 for d0 failed (-19) > (XEN) setup 0000:00:18.4 for d0 failed (-19) > (XEN) setup 0000:00:18.5 for d0 failed (-19) > > This patch add new device type (i.e. DEV_TYPE_PCI_HOST_BRIDGE) which > is corresponded to PCI class code 0x06 and sub-class 0x00. Then, it > use this new type to filter when trying to map device to IOMMU. > > Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> > --- > xen/drivers/passthrough/amd/pci_amd_iommu.c | 20 +++++++++++++---- > xen/drivers/passthrough/pci.c | 31 > ++++++++++++++++----------- > xen/drivers/passthrough/vtd/iommu.c | 2 ++ > xen/include/xen/pci.h | 1 + > 4 files changed, 38 insertions(+), 16 deletions(-) > > diff --git a/xen/drivers/passthrough/amd/pci_amd_iommu.c > b/xen/drivers/passthrough/amd/pci_amd_iommu.c > index 9684ae8..0bb954a 100644 > --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c > +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c > @@ -175,10 +175,22 @@ static int __init amd_iommu_setup_dom0_device(u8 devfn, > struct pci_dev *pdev) > > if ( unlikely(!iommu) ) > { > - AMD_IOMMU_DEBUG("No iommu for device %04x:%02x:%02x.%u\n", > - pdev->seg, pdev->bus, > - PCI_SLOT(devfn), PCI_FUNC(devfn)); > - return -ENODEV; > + /* Filter the bridge devices */ > + if ( (pdev->type == DEV_TYPE_PCIe_BRIDGE) > + || (pdev->type == DEV_TYPE_PCIe2PCI_BRIDGE) > + || (pdev->type == DEV_TYPE_LEGACY_PCI_BRIDGE) > + || (pdev->type == DEV_TYPE_PCI_HOST_BRIDGE) ) > + { > + AMD_IOMMU_DEBUG("Skipping device %04x:%02x:%02x.%u (type %x)\n", > + pdev->seg, PCI_BUS(bdf), PCI_SLOT(bdf), > PCI_FUNC(bdf), > + pdev->type); > + return 0; > + } else { > + AMD_IOMMU_DEBUG("No iommu for device %04x:%02x:%02x.%u\n", > + pdev->seg, pdev->bus, > + PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); > + return -ENODEV; > + } > } > > amd_iommu_setup_domain_device(pdev->domain, iommu, devfn, pdev); > diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c > index f2756c9..0b94fc4 100644 > --- a/xen/drivers/passthrough/pci.c > +++ b/xen/drivers/passthrough/pci.c > @@ -189,9 +189,6 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, > u8 bus, u8 devfn) > u16 cap; > u8 sec_bus, sub_bus; > > - case DEV_TYPE_PCIe_BRIDGE: > - break; > - > case DEV_TYPE_PCIe2PCI_BRIDGE: > case DEV_TYPE_LEGACY_PCI_BRIDGE: > sec_bus = pci_conf_read8(pseg->nr, bus, PCI_SLOT(devfn), > @@ -239,6 +236,8 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, > u8 bus, u8 devfn) > break; > > case DEV_TYPE_PCI: > + case DEV_TYPE_PCI_HOST_BRIDGE: > + case DEV_TYPE_PCIe_BRIDGE: > break; > > default: > @@ -691,35 +690,43 @@ void pci_release_devices(struct domain *d) > spin_unlock(&pcidevs_lock); > } > > -#define PCI_CLASS_BRIDGE_PCI 0x0604 > +#define PCI_CLASS_HOST_PCI_BRIDGE 0x0600 > +#define PCI_CLASS_PCI_PCI_BRIDGE 0x0604 > > enum pdev_type pdev_type(u16 seg, u8 bus, u8 devfn) > { > - u16 class_device, creg; > + u16 creg; > u8 d = PCI_SLOT(devfn), f = PCI_FUNC(devfn); > - int pos = pci_find_cap_offset(seg, bus, d, f, PCI_CAP_ID_EXP); > + u16 class_device = pci_conf_read16(seg, bus, d, f, PCI_CLASS_DEVICE); > + int cap_offset = pci_find_cap_offset(seg, bus, d, f, PCI_CAP_ID_EXP); > > - class_device = pci_conf_read16(seg, bus, d, f, PCI_CLASS_DEVICE); > switch ( class_device ) > { > - case PCI_CLASS_BRIDGE_PCI: > - if ( !pos ) > + case PCI_CLASS_PCI_PCI_BRIDGE: > + if ( !cap_offset ) > return DEV_TYPE_LEGACY_PCI_BRIDGE; > - creg = pci_conf_read16(seg, bus, d, f, pos + PCI_EXP_FLAGS); > + > + creg = pci_conf_read16(seg, bus, d, f, cap_offset + PCI_EXP_FLAGS); > + > switch ( (creg & PCI_EXP_FLAGS_TYPE) >> 4 ) > { > case PCI_EXP_TYPE_PCI_BRIDGE: > return DEV_TYPE_PCIe2PCI_BRIDGE; > case PCI_EXP_TYPE_PCIE_BRIDGE: > return DEV_TYPE_PCI2PCIe_BRIDGE; > + default: > + return DEV_TYPE_PCIe_BRIDGE; > } > - return DEV_TYPE_PCIe_BRIDGE; > + break; > + > + case PCI_CLASS_HOST_PCI_BRIDGE: > + return DEV_TYPE_PCI_HOST_BRIDGE; > > case 0x0000: case 0xffff: > return DEV_TYPE_PCI_UNKNOWN; > } > > - return pos ? DEV_TYPE_PCIe_ENDPOINT : DEV_TYPE_PCI; > + return cap_offset ? DEV_TYPE_PCIe_ENDPOINT : DEV_TYPE_PCI; > } > > /* > diff --git a/xen/drivers/passthrough/vtd/iommu.c > b/xen/drivers/passthrough/vtd/iommu.c > index 76f7b8e..046262c 100644 > --- a/xen/drivers/passthrough/vtd/iommu.c > +++ b/xen/drivers/passthrough/vtd/iommu.c > @@ -1448,6 +1448,7 @@ static int domain_context_mapping( > break; > > case DEV_TYPE_PCI: > + case DEV_TYPE_PCI_HOST_BRIDGE: > if ( iommu_verbose ) > dprintk(VTDPREFIX, "d%d:PCI: map %04x:%02x:%02x.%u\n", > domain->domain_id, seg, bus, > @@ -1577,6 +1578,7 @@ static int domain_context_unmap( > break; > > case DEV_TYPE_PCI: > + case DEV_TYPE_PCI_HOST_BRIDGE: > if ( iommu_verbose ) > dprintk(VTDPREFIX, "d%d:PCI: unmap %04x:%02x:%02x.%u\n", > domain->domain_id, seg, bus, PCI_SLOT(devfn), > PCI_FUNC(devfn)); > diff --git a/xen/include/xen/pci.h b/xen/include/xen/pci.h > index ca72a99..2530491 100644 > --- a/xen/include/xen/pci.h > +++ b/xen/include/xen/pci.h > @@ -73,6 +73,7 @@ struct pci_dev { > DEV_TYPE_PCIe2PCI_BRIDGE, // PCIe-to-PCI/PCIx bridge > DEV_TYPE_PCI2PCIe_BRIDGE, // PCI/PCIx-to-PCIe bridge > DEV_TYPE_LEGACY_PCI_BRIDGE, // Legacy PCI bridge > + DEV_TYPE_PCI_HOST_BRIDGE, // PCI Host bridge > DEV_TYPE_PCI, > } type; > _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |