[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-4.0-testing] x2APIC/VT-d: improve interrupt remapping and queued invalidation enabling and disabling
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1279186999 -3600 # Node ID a35e5f33a72eee3d00cec6972bb93585609559e2 # Parent 4ba86edf38f816a0d94cfb85b90074a72113e41c x2APIC/VT-d: improve interrupt remapping and queued invalidation enabling and disabling x2APIC depends on interrupt remapping, so interrupt remapping needs to be enabled before x2APIC. Usually x2APIC is not enabled (x2apic_enabled=0) when enable interrupt remapping, although x2APIC will be enabled later. So it needs to pass a parameter to set interrupt mode in intremap_enable, instead of checking x2apic_enable. This patch adds a parameter "eim" to intremap_enable to achieve it. Interrupt remapping and queued invalidation are already enabled when enable x2apic, so it needn't to enable them again when setup iommu. This patch checks if interrupt remapping and queued invalidation are already enable or not, and won't enable them if already enabled. It does the similar in disabling, that's to say don't disable them if already disabled. Signed-off-by: Weidong Han <weidong.han@xxxxxxxxx> xen-unstable changeset: 21717:176956d1d2fd xen-unstable date: Mon Jul 05 08:30:25 2010 +0100 --- xen/drivers/passthrough/vtd/extern.h | 2 - xen/drivers/passthrough/vtd/intremap.c | 28 ++++++++++++++++++++++--- xen/drivers/passthrough/vtd/iommu.c | 36 ++++++++++++++------------------- xen/drivers/passthrough/vtd/qinval.c | 19 +++++++++++++---- 4 files changed, 56 insertions(+), 29 deletions(-) diff -r 4ba86edf38f8 -r a35e5f33a72e xen/drivers/passthrough/vtd/extern.h --- a/xen/drivers/passthrough/vtd/extern.h Thu Jul 15 10:42:38 2010 +0100 +++ b/xen/drivers/passthrough/vtd/extern.h Thu Jul 15 10:43:19 2010 +0100 @@ -33,7 +33,7 @@ extern struct keyhandler dump_iommu_info int enable_qinval(struct iommu *iommu); void disable_qinval(struct iommu *iommu); -int enable_intremap(struct iommu *iommu); +int enable_intremap(struct iommu *iommu, int eim); void disable_intremap(struct iommu *iommu); int queue_invalidate_context(struct iommu *iommu, u16 did, u16 source_id, u8 function_mask, u8 granu); diff -r 4ba86edf38f8 -r a35e5f33a72e xen/drivers/passthrough/vtd/intremap.c --- a/xen/drivers/passthrough/vtd/intremap.c Thu Jul 15 10:42:38 2010 +0100 +++ b/xen/drivers/passthrough/vtd/intremap.c Thu Jul 15 10:43:19 2010 +0100 @@ -709,7 +709,7 @@ void msi_msg_write_remap_rte( } #endif -int enable_intremap(struct iommu *iommu) +int enable_intremap(struct iommu *iommu, int eim) { struct acpi_drhd_unit *drhd; struct ir_ctrl *ir_ctrl; @@ -719,10 +719,25 @@ int enable_intremap(struct iommu *iommu) ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap); ir_ctrl = iommu_ir_ctrl(iommu); + sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); + + /* Return if already enabled by Xen */ + if ( (sts & DMA_GSTS_IRES) && ir_ctrl->iremap_maddr ) + return 0; + + sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); + if ( !(sts & DMA_GSTS_QIES) ) + { + dprintk(XENLOG_ERR VTDPREFIX, + "Queued invalidation is not enabled, should not enable " + "interrupt remapping\n"); + return -EINVAL; + } + if ( ir_ctrl->iremap_maddr == 0 ) { drhd = iommu_to_drhd(iommu); - ir_ctrl->iremap_maddr = alloc_pgtable_maddr(drhd, IREMAP_ARCH_PAGE_NR ); + ir_ctrl->iremap_maddr = alloc_pgtable_maddr(drhd, IREMAP_ARCH_PAGE_NR); if ( ir_ctrl->iremap_maddr == 0 ) { dprintk(XENLOG_WARNING VTDPREFIX, @@ -735,7 +750,7 @@ int enable_intremap(struct iommu *iommu) #ifdef CONFIG_X86 /* set extended interrupt mode bit */ ir_ctrl->iremap_maddr |= - x2apic_enabled ? (1 << IRTA_REG_EIME_SHIFT) : 0; + eim ? (1 << IRTA_REG_EIME_SHIFT) : 0; #endif spin_lock_irqsave(&iommu->register_lock, flags); @@ -772,13 +787,18 @@ void disable_intremap(struct iommu *iomm u32 sts; unsigned long flags; - ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap); + if ( !ecap_intr_remap(iommu->ecap) ) + return; spin_lock_irqsave(&iommu->register_lock, flags); sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); + if ( !(sts & DMA_GSTS_IRES) ) + goto out; + dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE)); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, !(sts & DMA_GSTS_IRES), sts); +out: spin_unlock_irqrestore(&iommu->register_lock, flags); } diff -r 4ba86edf38f8 -r a35e5f33a72e xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.c Thu Jul 15 10:42:38 2010 +0100 +++ b/xen/drivers/passthrough/vtd/iommu.c Thu Jul 15 10:43:19 2010 +0100 @@ -1829,24 +1829,20 @@ static int init_vtd_hw(void) spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_FECTL_REG, 0); spin_unlock_irqrestore(&iommu->register_lock, flags); - - /* initialize flush functions */ - flush = iommu_get_flush(iommu); - flush->context = flush_context_reg; - flush->iotlb = flush_iotlb_reg; - } - - if ( iommu_qinval ) - { - for_each_drhd_unit ( drhd ) - { - iommu = drhd->iommu; - if ( enable_qinval(iommu) != 0 ) - { - dprintk(XENLOG_INFO VTDPREFIX, - "Failed to enable Queued Invalidation!\n"); - break; - } + } + + for_each_drhd_unit ( drhd ) + { + iommu = drhd->iommu; + /* + * If queued invalidation not enabled, use regiser based + * invalidation + */ + if ( enable_qinval(iommu) != 0 ) + { + flush = iommu_get_flush(iommu); + flush->context = flush_context_reg; + flush->iotlb = flush_iotlb_reg; } } @@ -1872,9 +1868,9 @@ static int init_vtd_hw(void) for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; - if ( enable_intremap(iommu) != 0 ) + if ( enable_intremap(iommu, 0) != 0 ) { - dprintk(XENLOG_INFO VTDPREFIX, + dprintk(XENLOG_WARNING VTDPREFIX, "Failed to enable Interrupt Remapping!\n"); break; } diff -r 4ba86edf38f8 -r a35e5f33a72e xen/drivers/passthrough/vtd/qinval.c --- a/xen/drivers/passthrough/vtd/qinval.c Thu Jul 15 10:42:38 2010 +0100 +++ b/xen/drivers/passthrough/vtd/qinval.c Thu Jul 15 10:43:19 2010 +0100 @@ -437,10 +437,16 @@ int enable_qinval(struct iommu *iommu) u32 sts; unsigned long flags; + if ( !ecap_queued_inval(iommu->ecap) || !iommu_qinval ) + return -ENOENT; + qi_ctrl = iommu_qi_ctrl(iommu); flush = iommu_get_flush(iommu); - ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval); + /* Return if already enabled by Xen */ + sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); + if ( (sts & DMA_GSTS_QIES) && qi_ctrl->qinval_maddr ) + return 0; if ( qi_ctrl->qinval_maddr == 0 ) { @@ -488,14 +494,19 @@ void disable_qinval(struct iommu *iommu) u32 sts; unsigned long flags; - ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval); + if ( !ecap_queued_inval(iommu->ecap) ) + return; spin_lock_irqsave(&iommu->register_lock, flags); sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); + if ( !(sts & DMA_GSTS_QIES) ) + goto out; + dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE)); /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, !(sts & DMA_GSTS_QIES), sts); - spin_unlock_irqrestore(&iommu->register_lock, flags); -} +out: + spin_unlock_irqrestore(&iommu->register_lock, flags); +} _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |