[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 5/5] iommu / pci: re-implement XEN_DOMCTL_get_device_group...
... using the new iommu_group infrastructure. Because 'sibling' devices are now members of the same iommu_group, implement the domctl by looking up the relevant iommu_group and walking the membership list. Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> --- Cc: Jan Beulich <jbeulich@xxxxxxxx> --- xen/drivers/passthrough/iommu.c | 65 +++++++++++++++++++++++++++++++++++++++++ xen/drivers/passthrough/pci.c | 47 ----------------------------- xen/include/xen/iommu.h | 2 ++ 3 files changed, 67 insertions(+), 47 deletions(-) diff --git a/xen/drivers/passthrough/iommu.c b/xen/drivers/passthrough/iommu.c index 11319fbaae..49140c652e 100644 --- a/xen/drivers/passthrough/iommu.c +++ b/xen/drivers/passthrough/iommu.c @@ -729,6 +729,71 @@ int iommu_group_assign(struct pci_dev *pdev) return 0; } +static struct iommu_group *iommu_group_lookup(uint16_t seg, uint8_t bus, + uint8_t devfn) +{ + unsigned int id = 0; + struct iommu_group *grp; + + while ( radix_tree_gang_lookup(&iommu_groups, (void **)&grp, id, 1) ) + { + struct pci_dev *pdev; + + list_for_each_entry ( pdev, &grp->devs_list, grpdevs_list ) + if ( pdev->seg == seg && pdev->bus == bus && + pdev->devfn == devfn ) + return grp; + + id = grp->id + 1; + } + + return NULL; +} + +int iommu_get_device_group(struct domain *d, u16 seg, u8 bus, u8 devfn, + XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs) +{ + struct iommu_group *grp; + struct pci_dev *pdev; + int i = 0; + + pcidevs_lock(); + + grp = iommu_group_lookup(seg, bus, devfn); + if ( !grp ) + { + pcidevs_unlock(); + return 0; + } + + list_for_each_entry ( pdev, &grp->devs_list, grpdevs_list ) + { + uint32_t sbdf; + + if ( i >= max_sdevs ) + break; + + if ( pdev->domain != d ) + continue; + + sbdf = PCI_SBDF3(pdev->seg, pdev->bus, pdev->devfn); + + if ( xsm_get_device_group(XSM_HOOK, sbdf) ) + continue; + + if ( unlikely(copy_to_guest_offset(buf, i, &sbdf, 1)) ) + { + pcidevs_unlock(); + return -1; + } + i++; + } + + pcidevs_unlock(); + + return i; +} + #endif /* CONFIG_HAS_PCI */ /* diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c index 6210409741..68b2883ed6 100644 --- a/xen/drivers/passthrough/pci.c +++ b/xen/drivers/passthrough/pci.c @@ -1554,53 +1554,6 @@ int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn) return ret; } -static int iommu_get_device_group( - struct domain *d, u16 seg, u8 bus, u8 devfn, - XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs) -{ - const struct domain_iommu *hd = dom_iommu(d); - struct pci_dev *pdev; - int group_id, sdev_id; - u32 bdf; - int i = 0; - const struct iommu_ops *ops = hd->platform_ops; - - if ( !iommu_enabled || !ops || !ops->get_device_group_id ) - return 0; - - group_id = ops->get_device_group_id(seg, bus, devfn); - - pcidevs_lock(); - for_each_pdev( d, pdev ) - { - if ( (pdev->seg != seg) || - ((pdev->bus == bus) && (pdev->devfn == devfn)) ) - continue; - - if ( xsm_get_device_group(XSM_HOOK, (seg << 16) | (pdev->bus << 8) | pdev->devfn) ) - continue; - - sdev_id = ops->get_device_group_id(seg, pdev->bus, pdev->devfn); - if ( (sdev_id == group_id) && (i < max_sdevs) ) - { - bdf = 0; - bdf |= (pdev->bus & 0xff) << 16; - bdf |= (pdev->devfn & 0xff) << 8; - - if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) ) - { - pcidevs_unlock(); - return -1; - } - i++; - } - } - - pcidevs_unlock(); - - return i; -} - void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev) { pcidevs_lock(); diff --git a/xen/include/xen/iommu.h b/xen/include/xen/iommu.h index 6d6aa12314..c4e1604bb8 100644 --- a/xen/include/xen/iommu.h +++ b/xen/include/xen/iommu.h @@ -319,6 +319,8 @@ extern struct page_list_head iommu_pt_cleanup_list; void iommu_groups_init(void); int iommu_group_assign(struct pci_dev *pdev); +int iommu_get_device_group(struct domain *d, u16 seg, u8 bus, u8 devfn, + XEN_GUEST_HANDLE_64(uint32) buf, int max_sdevs); #endif /* CONFIG_HAS_PCI */ -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |