[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] AMD IOMMU: Enable HPET broadcast msi remapping
# HG changeset patch # User Wei Wang <wei.wang2@xxxxxxx> # Date 1353314806 -3600 # Node ID 9025a10c056174671c6c0d87816732a2ec6eaf0e # Parent 321f8487379bd6ca157fe11b4b518436d8b31e7e AMD IOMMU: Enable HPET broadcast msi remapping This patch enables hpet msi remapping for amd iommu. Signed-off-by: Wei Wang <wei.wang2@xxxxxxx> - use the existing ACPI_IVHD_* #define-s - warn on finding more than one IVHD HPET entry - consolidate parameters of update_intremap_entry_from_msi_msg() Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Committed-by: Jan Beulich <jbeulich@xxxxxxxx> --- diff -r 321f8487379b -r 9025a10c0561 xen/drivers/passthrough/amd/iommu_acpi.c --- a/xen/drivers/passthrough/amd/iommu_acpi.c Thu Nov 15 10:25:29 2012 +0000 +++ b/xen/drivers/passthrough/amd/iommu_acpi.c Mon Nov 19 09:46:46 2012 +0100 @@ -653,9 +653,32 @@ static u16 __init parse_ivhd_device_spec } add_ivrs_mapping_entry(bdf, bdf, special->header.data_setting, iommu); + + switch ( special->variety ) + { + case ACPI_IVHD_IOAPIC: /* set device id of ioapic */ - ioapic_sbdf[special->handle].bdf = bdf; - ioapic_sbdf[special->handle].seg = seg; + ioapic_sbdf[special->handle].bdf = bdf; + ioapic_sbdf[special->handle].seg = seg; + break; + case ACPI_IVHD_HPET: + /* set device id of hpet */ + if ( hpet_sbdf.iommu ) + { + printk(XENLOG_WARNING "Only one IVHD HPET entry is supported\n"); + break; + } + hpet_sbdf.id = special->handle; + hpet_sbdf.bdf = bdf; + hpet_sbdf.seg = seg; + hpet_sbdf.iommu = iommu; + break; + default: + printk(XENLOG_ERR "Unrecognized IVHD special variety %#x\n", + special->variety); + return 0; + } + return dev_length; } diff -r 321f8487379b -r 9025a10c0561 xen/drivers/passthrough/amd/iommu_intr.c --- a/xen/drivers/passthrough/amd/iommu_intr.c Thu Nov 15 10:25:29 2012 +0000 +++ b/xen/drivers/passthrough/amd/iommu_intr.c Mon Nov 19 09:46:46 2012 +0100 @@ -28,6 +28,7 @@ #define INTREMAP_ENTRIES (1 << INTREMAP_LENGTH) struct ioapic_sbdf ioapic_sbdf[MAX_IO_APICS]; +struct hpet_sbdf hpet_sbdf; void *shared_intremap_table; static DEFINE_SPINLOCK(shared_intremap_lock); @@ -262,19 +263,18 @@ void amd_iommu_ioapic_update_ire( } static void update_intremap_entry_from_msi_msg( - struct amd_iommu *iommu, struct pci_dev *pdev, + struct amd_iommu *iommu, u8 bdf, struct msi_desc *msi_desc, struct msi_msg *msg) { unsigned long flags; u32* entry; - u16 bdf, req_id, alias_id; + u16 req_id, alias_id; u8 delivery_mode, dest, vector, dest_mode; spinlock_t *lock; int offset; - bdf = PCI_BDF2(pdev->bus, pdev->devfn); - req_id = get_dma_requestor_id(pdev->seg, bdf); - alias_id = get_intremap_requestor_id(pdev->seg, bdf); + req_id = get_dma_requestor_id(iommu->seg, bdf); + alias_id = get_intremap_requestor_id(iommu->seg, bdf); if ( msg == NULL ) { @@ -284,7 +284,7 @@ static void update_intremap_entry_from_m spin_unlock_irqrestore(lock, flags); if ( ( req_id != alias_id ) && - get_ivrs_mappings(pdev->seg)[alias_id].intremap_table != NULL ) + get_ivrs_mappings(iommu->seg)[alias_id].intremap_table != NULL ) { lock = get_intremap_lock(iommu->seg, alias_id); spin_lock_irqsave(lock, flags); @@ -317,7 +317,7 @@ static void update_intremap_entry_from_m lock = get_intremap_lock(iommu->seg, alias_id); if ( ( req_id != alias_id ) && - get_ivrs_mappings(pdev->seg)[alias_id].intremap_table != NULL ) + get_ivrs_mappings(iommu->seg)[alias_id].intremap_table != NULL ) { spin_lock_irqsave(lock, flags); entry = (u32*)get_intremap_entry(iommu->seg, alias_id, offset); @@ -340,20 +340,23 @@ void amd_iommu_msi_msg_update_ire( struct msi_desc *msi_desc, struct msi_msg *msg) { struct pci_dev *pdev = msi_desc->dev; - int bdf = PCI_BDF2(pdev->bus, pdev->devfn); + int bdf, seg; struct amd_iommu *iommu; if ( !iommu_intremap ) return; - iommu = find_iommu_for_device(pdev->seg, bdf); + bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf; + seg = pdev ? pdev->seg : hpet_sbdf.seg; + + iommu = find_iommu_for_device(seg, bdf); if ( !iommu ) { AMD_IOMMU_DEBUG("Fail to find iommu for MSI device id = %#x\n", bdf); return; } - update_intremap_entry_from_msi_msg(iommu, pdev, msi_desc, msg); + update_intremap_entry_from_msi_msg(iommu, bdf, msi_desc, msg); } void amd_iommu_read_msi_from_ire( @@ -383,3 +386,15 @@ void* __init amd_iommu_alloc_intremap_ta memset(tb, 0, PAGE_SIZE * (1UL << INTREMAP_TABLE_ORDER)); return tb; } + +int __init amd_setup_hpet_msi(struct msi_desc *msi_desc) +{ + if ( (!msi_desc->hpet_id != hpet_sbdf.id) || + (hpet_sbdf.iommu == NULL) ) + { + AMD_IOMMU_DEBUG("Fail to setup HPET MSI remapping\n"); + return 1; + } + + return 0; +} diff -r 321f8487379b -r 9025a10c0561 xen/drivers/passthrough/amd/pci_amd_iommu.c --- a/xen/drivers/passthrough/amd/pci_amd_iommu.c Thu Nov 15 10:25:29 2012 +0000 +++ b/xen/drivers/passthrough/amd/pci_amd_iommu.c Mon Nov 19 09:46:46 2012 +0100 @@ -599,6 +599,7 @@ const struct iommu_ops amd_iommu_ops = { .update_ire_from_msi = amd_iommu_msi_msg_update_ire, .read_apic_from_ire = __io_apic_read, .read_msi_from_ire = amd_iommu_read_msi_from_ire, + .setup_hpet_msi = amd_setup_hpet_msi, .suspend = amd_iommu_suspend, .resume = amd_iommu_resume, .share_p2m = amd_iommu_share_p2m, diff -r 321f8487379b -r 9025a10c0561 xen/include/asm-x86/hvm/svm/amd-iommu-proto.h --- a/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Thu Nov 15 10:25:29 2012 +0000 +++ b/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h Mon Nov 19 09:46:46 2012 +0100 @@ -97,12 +97,18 @@ void amd_iommu_msi_msg_update_ire( struct msi_desc *msi_desc, struct msi_msg *msg); void amd_iommu_read_msi_from_ire( struct msi_desc *msi_desc, struct msi_msg *msg); +int amd_setup_hpet_msi(struct msi_desc *msi_desc); extern struct ioapic_sbdf { u16 bdf, seg; } ioapic_sbdf[MAX_IO_APICS]; extern void *shared_intremap_table; +extern struct hpet_sbdf { + u16 bdf, seg, id; + struct amd_iommu *iommu; +} hpet_sbdf; + /* power management support */ void amd_iommu_resume(void); void amd_iommu_suspend(void); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |