[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] vtd: fix Dom0 S3 when VT-d is enabled.
# HG changeset patch # User Keir Fraser <keir.fraser@xxxxxxxxxx> # Date 1220022993 -3600 # Node ID c503269192f265c125a1d8d42375aaead84c5fca # Parent 481f0dc6beb0b19cb02354dbe9b4ce068a5f6a18 vtd: fix Dom0 S3 when VT-d is enabled. Now if VT-d is enabled, when Dom0 does S3, Xen doesn't suspend/resume the IOMMU states. The patch adds the missing invocations of iommu_suspend()/iommu_resume() and makes some nesessary fixes: 1) In iommu_set_root_entry(), we should not re-allocate the root-entry when Xen returns from S3; 2) Define the array iommu_state correctly (u8 -> u32); 3) Only save/restore the necessary IOMMU registers. The patch was tested on Weybridge. NOTE: if we have some HVM guests which have assigned devices, and we want to do Dom S3, we MUST do HVM S3 for each HVM guest first. Namely, the steps are: 1) Use HVM S3 in guest to suspend the guest (for example, in Linux HVM guest, this is "echo mem > /sys/power/state"); 2) Use Dom0 S3 to suspend Dom0: "echo mem > /sys/power/state"; 3) At some point, Dom0 resumes from S3 (for example, by pressing the power button or using the acpi alarm); 4) Use "xm trigger HVM_DOM_ID s3resume" to resume every HVM guest which were suspended previously. Signed-off-by: Dexuan Cui <dexuan.cui@xxxxxxxxx> --- xen/arch/x86/acpi/power.c | 5 ++ xen/drivers/passthrough/vtd/iommu.c | 70 ++++++++++++++---------------------- xen/include/xen/iommu.h | 4 ++ 3 files changed, 37 insertions(+), 42 deletions(-) diff -r 481f0dc6beb0 -r c503269192f2 xen/arch/x86/acpi/power.c --- a/xen/arch/x86/acpi/power.c Fri Aug 29 16:11:05 2008 +0100 +++ b/xen/arch/x86/acpi/power.c Fri Aug 29 16:16:33 2008 +0100 @@ -24,6 +24,7 @@ #include <xen/sched.h> #include <xen/domain.h> #include <xen/console.h> +#include <xen/iommu.h> #include <public/platform.h> #include <asm/tboot.h> @@ -41,6 +42,8 @@ void do_suspend_lowlevel(void); static int device_power_down(void) { + iommu_suspend(); + console_suspend(); time_suspend(); @@ -65,6 +68,8 @@ static void device_power_up(void) time_resume(); console_resume(); + + iommu_resume(); } static void freeze_domains(void) diff -r 481f0dc6beb0 -r c503269192f2 xen/drivers/passthrough/vtd/iommu.c --- a/xen/drivers/passthrough/vtd/iommu.c Fri Aug 29 16:11:05 2008 +0100 +++ b/xen/drivers/passthrough/vtd/iommu.c Fri Aug 29 16:16:33 2008 +0100 @@ -624,15 +624,10 @@ static int iommu_set_root_entry(struct i unsigned long flags; s_time_t start_time; - if ( iommu->root_maddr != 0 ) - { - free_pgtable_maddr(iommu->root_maddr); - iommu->root_maddr = 0; - } - spin_lock_irqsave(&iommu->register_lock, flags); - iommu->root_maddr = alloc_pgtable_maddr(); + if ( iommu->root_maddr == 0 ) + iommu->root_maddr = alloc_pgtable_maddr(); if ( iommu->root_maddr == 0 ) { spin_unlock_irqrestore(&iommu->register_lock, flags); @@ -1864,37 +1859,31 @@ static int intel_iommu_group_id(u8 bus, return -1; } -u8 iommu_state[MAX_IOMMU_REGS * MAX_IOMMUS]; +static u32 iommu_state[MAX_IOMMUS][MAX_IOMMU_REGS]; int iommu_suspend(void) { struct acpi_drhd_unit *drhd; struct iommu *iommu; - int i = 0; + u32 i; + + if ( !vtd_enabled ) + return 0; iommu_flush_all(); for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; - iommu_state[DMAR_RTADDR_REG * i] = - (u64) dmar_readq(iommu->reg, DMAR_RTADDR_REG); - iommu_state[DMAR_FECTL_REG * i] = + i = iommu->index; + + iommu_state[i][DMAR_FECTL_REG] = (u32) dmar_readl(iommu->reg, DMAR_FECTL_REG); - iommu_state[DMAR_FEDATA_REG * i] = + iommu_state[i][DMAR_FEDATA_REG] = (u32) dmar_readl(iommu->reg, DMAR_FEDATA_REG); - iommu_state[DMAR_FEADDR_REG * i] = + iommu_state[i][DMAR_FEADDR_REG] = (u32) dmar_readl(iommu->reg, DMAR_FEADDR_REG); - iommu_state[DMAR_FEUADDR_REG * i] = + iommu_state[i][DMAR_FEUADDR_REG] = (u32) dmar_readl(iommu->reg, DMAR_FEUADDR_REG); - iommu_state[DMAR_PLMBASE_REG * i] = - (u32) dmar_readl(iommu->reg, DMAR_PLMBASE_REG); - iommu_state[DMAR_PLMLIMIT_REG * i] = - (u32) dmar_readl(iommu->reg, DMAR_PLMLIMIT_REG); - iommu_state[DMAR_PHMBASE_REG * i] = - (u64) dmar_readq(iommu->reg, DMAR_PHMBASE_REG); - iommu_state[DMAR_PHMLIMIT_REG * i] = - (u64) dmar_readq(iommu->reg, DMAR_PHMLIMIT_REG); - i++; } return 0; @@ -1904,37 +1893,34 @@ int iommu_resume(void) { struct acpi_drhd_unit *drhd; struct iommu *iommu; - int i = 0; + u32 i; + + if ( !vtd_enabled ) + return 0; iommu_flush_all(); - init_vtd_hw(); + if ( init_vtd_hw() != 0 && force_iommu ) + panic("IOMMU setup failed, crash Xen for security purpose!\n"); + for_each_drhd_unit ( drhd ) { iommu = drhd->iommu; - dmar_writeq( iommu->reg, DMAR_RTADDR_REG, - (u64) iommu_state[DMAR_RTADDR_REG * i]); + i = iommu->index; + dmar_writel(iommu->reg, DMAR_FECTL_REG, - (u32) iommu_state[DMAR_FECTL_REG * i]); + (u32) iommu_state[i][DMAR_FECTL_REG]); dmar_writel(iommu->reg, DMAR_FEDATA_REG, - (u32) iommu_state[DMAR_FEDATA_REG * i]); + (u32) iommu_state[i][DMAR_FEDATA_REG]); dmar_writel(iommu->reg, DMAR_FEADDR_REG, - (u32) iommu_state[DMAR_FEADDR_REG * i]); + (u32) iommu_state[i][DMAR_FEADDR_REG]); dmar_writel(iommu->reg, DMAR_FEUADDR_REG, - (u32) iommu_state[DMAR_FEUADDR_REG * i]); - dmar_writel(iommu->reg, DMAR_PLMBASE_REG, - (u32) iommu_state[DMAR_PLMBASE_REG * i]); - dmar_writel(iommu->reg, DMAR_PLMLIMIT_REG, - (u32) iommu_state[DMAR_PLMLIMIT_REG * i]); - dmar_writeq(iommu->reg, DMAR_PHMBASE_REG, - (u64) iommu_state[DMAR_PHMBASE_REG * i]); - dmar_writeq(iommu->reg, DMAR_PHMLIMIT_REG, - (u64) iommu_state[DMAR_PHMLIMIT_REG * i]); + (u32) iommu_state[i][DMAR_FEUADDR_REG]); if ( iommu_enable_translation(iommu) ) return -EIO; - i++; - } + } + return 0; } diff -r 481f0dc6beb0 -r c503269192f2 xen/include/xen/iommu.h --- a/xen/include/xen/iommu.h Fri Aug 29 16:11:05 2008 +0100 +++ b/xen/include/xen/iommu.h Fri Aug 29 16:16:33 2008 +0100 @@ -109,4 +109,8 @@ struct iommu_ops { void iommu_update_ire_from_apic(unsigned int apic, unsigned int reg, unsigned int value); void iommu_update_ire_from_msi(struct msi_desc *msi_desc, struct msi_msg *msg); + +int iommu_suspend(void); +int iommu_resume(void); + #endif /* _IOMMU_H_ */ _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |