[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] vtd: Clean up lock for VT-d register writes
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1245148280 -3600 # Node ID cc07094a02e491240d15944d6e70bedcaca8d541 # Parent fa51db0871e1c4d8eca9913c6fc36c299be4e8d4 vtd: Clean up lock for VT-d register writes It should get lock to write VT-d registers. Currently there are some register writes without lock. This patch complements register_lock for those writes. Signed-off-by: Weidong Han <weidong.han@xxxxxxxxx> --- xen/drivers/passthrough/vtd/intremap.c | 13 +++++++++++-- xen/drivers/passthrough/vtd/iommu.c | 32 +++++++++++++++++++++----------- xen/drivers/passthrough/vtd/qinval.c | 9 ++++++++- xen/drivers/passthrough/vtd/utils.c | 3 +++ 4 files changed, 43 insertions(+), 14 deletions(-) diff -r fa51db0871e1 -r cc07094a02e4 xen/drivers/passthrough/vtd/intremap.c --- a/xen/drivers/passthrough/vtd/intremap.c Tue Jun 16 11:30:45 2009 +0100 +++ b/xen/drivers/passthrough/vtd/intremap.c Tue Jun 16 11:31:20 2009 +0100 @@ -535,6 +535,7 @@ int enable_intremap(struct iommu *iommu) { struct ir_ctrl *ir_ctrl; u32 sts, gcmd; + unsigned long flags; ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap); @@ -556,6 +557,8 @@ int enable_intremap(struct iommu *iommu) ir_ctrl->iremap_maddr |= ecap_ext_intr(iommu->ecap) ? (1 << IRTA_REG_EIME_SHIFT) : 0; #endif + spin_lock_irqsave(&iommu->register_lock, flags); + /* set size of the interrupt remapping table */ ir_ctrl->iremap_maddr |= IRTA_REG_TABLE_SIZE; dmar_writeq(iommu->reg, DMAR_IRTA_REG, ir_ctrl->iremap_maddr); @@ -567,10 +570,12 @@ int enable_intremap(struct iommu *iommu) IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, (sts & DMA_GSTS_SIRTPS), sts); - + spin_unlock_irqrestore(&iommu->register_lock, flags); + /* After set SIRTP, must globally invalidate the interrupt entry cache */ iommu_flush_iec_global(iommu); + spin_lock_irqsave(&iommu->register_lock, flags); /* enable comaptiblity format interrupt pass through */ gcmd |= DMA_GCMD_CFI; dmar_writel(iommu->reg, DMAR_GCMD_REG, gcmd); @@ -584,6 +589,7 @@ int enable_intremap(struct iommu *iommu) IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, (sts & DMA_GSTS_IRES), sts); + spin_unlock_irqrestore(&iommu->register_lock, flags); return init_apic_pin_2_ir_idx(); } @@ -591,12 +597,15 @@ void disable_intremap(struct iommu *iomm void disable_intremap(struct iommu *iommu) { u32 sts; + unsigned long flags; ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap); + spin_lock_irqsave(&iommu->register_lock, flags); sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_IRE)); IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, !(sts & DMA_GSTS_IRES), sts); -} + spin_unlock_irqrestore(&iommu->register_lock, flags); +} diff -r fa51db0871e1 -r cc07094a02e4 xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.c Tue Jun 16 11:30:45 2009 +0100 +++ b/xen/drivers/passthrough/vtd/iommu.c Tue Jun 16 11:31:20 2009 +0100 @@ -229,12 +229,12 @@ static void iommu_flush_write_buffer(str static void iommu_flush_write_buffer(struct iommu *iommu) { u32 val; - unsigned long flag; + unsigned long flags; if ( !rwbf_quirk && !cap_rwbf(iommu->cap) ) return; - spin_lock_irqsave(&iommu->register_lock, flag); + spin_lock_irqsave(&iommu->register_lock, flags); val = dmar_readl(iommu->reg, DMAR_GSTS_REG); dmar_writel(iommu->reg, DMAR_GCMD_REG, val | DMA_GCMD_WBF); @@ -242,7 +242,7 @@ static void iommu_flush_write_buffer(str IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, !(val & DMA_GSTS_WBFS), val); - spin_unlock_irqrestore(&iommu->register_lock, flag); + spin_unlock_irqrestore(&iommu->register_lock, flags); } /* return value determine if we need a write buffer flush */ @@ -253,7 +253,7 @@ static int flush_context_reg( { struct iommu *iommu = (struct iommu *) _iommu; u64 val = 0; - unsigned long flag; + unsigned long flags; /* * In the non-present entry flush case, if hardware doesn't cache @@ -287,14 +287,14 @@ static int flush_context_reg( } val |= DMA_CCMD_ICC; - spin_lock_irqsave(&iommu->register_lock, flag); + spin_lock_irqsave(&iommu->register_lock, flags); dmar_writeq(iommu->reg, DMAR_CCMD_REG, val); /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_CCMD_REG, dmar_readq, !(val & DMA_CCMD_ICC), val); - spin_unlock_irqrestore(&iommu->register_lock, flag); + spin_unlock_irqrestore(&iommu->register_lock, flags); /* flush context entry will implicitly flush write buffer */ return 0; } @@ -333,7 +333,7 @@ static int flush_iotlb_reg(void *_iommu, struct iommu *iommu = (struct iommu *) _iommu; int tlb_offset = ecap_iotlb_offset(iommu->ecap); u64 val = 0, val_iva = 0; - unsigned long flag; + unsigned long flags; /* * In the non-present entry flush case, if hardware doesn't cache @@ -373,7 +373,7 @@ static int flush_iotlb_reg(void *_iommu, if ( cap_write_drain(iommu->cap) ) val |= DMA_TLB_WRITE_DRAIN; - spin_lock_irqsave(&iommu->register_lock, flag); + spin_lock_irqsave(&iommu->register_lock, flags); /* Note: Only uses first TLB reg currently */ if ( val_iva ) dmar_writeq(iommu->reg, tlb_offset, val_iva); @@ -382,7 +382,7 @@ static int flush_iotlb_reg(void *_iommu, /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, (tlb_offset + 8), dmar_readq, !(val & DMA_TLB_IVT), val); - spin_unlock_irqrestore(&iommu->register_lock, flag); + spin_unlock_irqrestore(&iommu->register_lock, flags); /* check IOTLB invalidation granularity */ if ( DMA_TLB_IAIG(val) == 0 ) @@ -590,10 +590,10 @@ static void iommu_enable_translation(str /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, (sts & DMA_GSTS_TES), sts); + spin_unlock_irqrestore(&iommu->register_lock, flags); /* Disable PMRs when VT-d engine takes effect per spec definition */ disable_pmr(iommu); - spin_unlock_irqrestore(&iommu->register_lock, flags); } static void iommu_disable_translation(struct iommu *iommu) @@ -1617,7 +1617,9 @@ void clear_fault_bits(struct iommu *iomm void clear_fault_bits(struct iommu *iommu) { u64 val; - + unsigned long flags; + + spin_lock_irqsave(&iommu->register_lock, flags); val = dmar_readq( iommu->reg, cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+0x8); @@ -1626,6 +1628,7 @@ void clear_fault_bits(struct iommu *iomm cap_fault_reg_offset(dmar_readq(iommu->reg,DMAR_CAP_REG))+8, val); dmar_writel(iommu->reg, DMAR_FSTS_REG, DMA_FSTS_FAULTS); + spin_unlock_irqrestore(&iommu->register_lock, flags); } static int init_vtd_hw(void) @@ -1635,6 +1638,7 @@ static int init_vtd_hw(void) struct iommu_flush *flush = NULL; int vector; int ret; + unsigned long flags; for_each_drhd_unit ( drhd ) { @@ -1652,7 +1656,10 @@ static int init_vtd_hw(void) dma_msi_data_init(iommu, iommu->vector); dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map))); clear_fault_bits(iommu); + + spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_FECTL_REG, 0); + spin_unlock_irqrestore(&iommu->register_lock, flags); /* initialize flush functions */ flush = iommu_get_flush(iommu); @@ -1942,6 +1949,7 @@ void iommu_resume(void) struct acpi_drhd_unit *drhd; struct iommu *iommu; u32 i; + unsigned long flags; if ( !iommu_enabled ) return; @@ -1954,6 +1962,7 @@ void iommu_resume(void) iommu = drhd->iommu; i = iommu->index; + spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_FECTL_REG, (u32) iommu_state[i][DMAR_FECTL_REG]); dmar_writel(iommu->reg, DMAR_FEDATA_REG, @@ -1962,6 +1971,7 @@ void iommu_resume(void) (u32) iommu_state[i][DMAR_FEADDR_REG]); dmar_writel(iommu->reg, DMAR_FEUADDR_REG, (u32) iommu_state[i][DMAR_FEUADDR_REG]); + spin_unlock_irqrestore(&iommu->register_lock, flags); iommu_enable_translation(iommu); } diff -r fa51db0871e1 -r cc07094a02e4 xen/drivers/passthrough/vtd/qinval.c --- a/xen/drivers/passthrough/vtd/qinval.c Tue Jun 16 11:30:45 2009 +0100 +++ b/xen/drivers/passthrough/vtd/qinval.c Tue Jun 16 11:31:20 2009 +0100 @@ -421,6 +421,7 @@ int enable_qinval(struct iommu *iommu) struct qi_ctrl *qi_ctrl; struct iommu_flush *flush; u32 sts; + unsigned long flags; qi_ctrl = iommu_qi_ctrl(iommu); flush = iommu_get_flush(iommu); @@ -449,6 +450,8 @@ int enable_qinval(struct iommu *iommu) * to IQA register. */ qi_ctrl->qinval_maddr |= IQA_REG_QS; + + spin_lock_irqsave(&iommu->register_lock, flags); dmar_writeq(iommu->reg, DMAR_IQA_REG, qi_ctrl->qinval_maddr); dmar_writeq(iommu->reg, DMAR_IQT_REG, 0); @@ -460,6 +463,7 @@ int enable_qinval(struct iommu *iommu) /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, (sts & DMA_GSTS_QIES), sts); + spin_unlock_irqrestore(&iommu->register_lock, flags); qinval_enabled = 1; return 0; @@ -468,13 +472,16 @@ void disable_qinval(struct iommu *iommu) void disable_qinval(struct iommu *iommu) { u32 sts; + unsigned long flags; ASSERT(ecap_queued_inval(iommu->ecap) && iommu_qinval); + spin_lock_irqsave(&iommu->register_lock, flags); sts = dmar_readl(iommu->reg, DMAR_GSTS_REG); dmar_writel(iommu->reg, DMAR_GCMD_REG, sts & (~DMA_GCMD_QIE)); /* Make sure hardware complete it */ IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, dmar_readl, !(sts & DMA_GSTS_QIES), sts); -} + spin_unlock_irqrestore(&iommu->register_lock, flags); +} diff -r fa51db0871e1 -r cc07094a02e4 xen/drivers/passthrough/vtd/utils.c --- a/xen/drivers/passthrough/vtd/utils.c Tue Jun 16 11:30:45 2009 +0100 +++ b/xen/drivers/passthrough/vtd/utils.c Tue Jun 16 11:31:20 2009 +0100 @@ -39,15 +39,18 @@ void disable_pmr(struct iommu *iommu) void disable_pmr(struct iommu *iommu) { u32 val; + unsigned long flags; val = dmar_readl(iommu->reg, DMAR_PMEN_REG); if ( !(val & DMA_PMEN_PRS) ) return; + spin_lock_irqsave(&iommu->register_lock, flags); dmar_writel(iommu->reg, DMAR_PMEN_REG, val & ~DMA_PMEN_EPM); IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, dmar_readl, !(val & DMA_PMEN_PRS), val); + spin_unlock_irqrestore(&iommu->register_lock, flags); dprintk(XENLOG_INFO VTDPREFIX, "Disabled protected memory registers\n"); _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |