[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen-unstable] merge with xen-unstable.hg
# HG changeset patch # User Alex Williamson <alex.williamson@xxxxxx> # Date 1191268777 21600 # Node ID f71b7d6ad5d84bc67859bf48b6377ba4f40dcd21 # Parent d3665dc74a414aea75132dfc585965310b8be05a # Parent 5c7afb32df999ceca475f6582e3df3d7ff42d2fe merge with xen-unstable.hg --- tools/python/xen/xm/main.py | 29 +++++ xen/arch/x86/hvm/hvm.c | 73 ++++++++++++++ xen/arch/x86/hvm/svm/svm.c | 96 ++---------------- xen/arch/x86/hvm/vmx/vmx.c | 61 +----------- xen/arch/x86/hvm/vmx/vtd/intel-iommu.c | 167 ++++++++++++++++++++------------- xen/arch/x86/mm/paging.c | 16 +-- xen/include/asm-x86/hvm/hvm.h | 3 xen/include/asm-x86/hvm/support.h | 1 xen/include/xsm/acm/acm_core.h | 2 xen/include/xsm/acm/acm_hooks.h | 4 xen/xsm/acm/acm_core.c | 10 + 11 files changed, 247 insertions(+), 215 deletions(-) diff -r d3665dc74a41 -r f71b7d6ad5d8 tools/python/xen/xm/main.py --- a/tools/python/xen/xm/main.py Mon Oct 01 09:59:24 2007 -0600 +++ b/tools/python/xen/xm/main.py Mon Oct 01 13:59:37 2007 -0600 @@ -55,6 +55,9 @@ from xen.util.acmpolicy import ACM_LABEL import XenAPI +import inspect +from xen.xend import XendOptions +xoptions = XendOptions.instance() # getopt.gnu_getopt is better, but only exists in Python 2.3+. Use # getopt.getopt if gnu_getopt is not available. This will mean that options @@ -1595,7 +1598,31 @@ def xm_sched_credit(args): err(str(result)) def xm_info(args): - arg_check(args, "info", 0) + arg_check(args, "info", 0, 1) + + try: + (options, params) = getopt.gnu_getopt(args, 'c', ['config']) + except getopt.GetoptError, opterr: + err(opterr) + usage('info') + + show_xend_config = 0 + for (k, v) in options: + if k in ['-c', '--config']: + show_xend_config = 1 + + if show_xend_config: + for name, obj in inspect.getmembers(xoptions): + if not inspect.ismethod(obj): + if name == "config": + for x in obj[1:]: + if len(x) < 2: + print "%-38s: (none)" % x[0] + else: + print "%-38s:" % x[0], x[1] + else: + print "%-38s:" % name, obj + return if serverType == SERVER_XEN_API: diff -r d3665dc74a41 -r f71b7d6ad5d8 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c Mon Oct 01 09:59:24 2007 -0600 +++ b/xen/arch/x86/hvm/hvm.c Mon Oct 01 13:59:37 2007 -0600 @@ -49,6 +49,10 @@ #include <public/version.h> #include <public/memory.h> +/* Xen command-line option to disable hardware-assisted paging */ +static int opt_hap_disabled; +invbool_param("hap", opt_hap_disabled); + int hvm_enabled __read_mostly; unsigned int opt_hvm_debug_level __read_mostly; @@ -74,6 +78,14 @@ void hvm_enable(struct hvm_function_tabl hvm_funcs = *fns; hvm_enabled = 1; + + if ( hvm_funcs.hap_supported ) + { + if ( opt_hap_disabled ) + hvm_funcs.hap_supported = 0; + printk("HVM: Hardware Assisted Paging %sabled\n", + hvm_funcs.hap_supported ? "en" : "dis"); + } } void hvm_set_guest_time(struct vcpu *v, u64 gtime) @@ -325,6 +337,34 @@ static int hvm_load_cpu_ctxt(struct doma if ( hvm_load_entry(CPU, h, &ctxt) != 0 ) return -EINVAL; + /* Sanity check some control registers. */ + if ( (ctxt.cr0 & HVM_CR0_GUEST_RESERVED_BITS) || + !(ctxt.cr0 & X86_CR0_ET) || + ((ctxt.cr0 & (X86_CR0_PE|X86_CR0_PG)) == X86_CR0_PG) ) + { + gdprintk(XENLOG_ERR, "HVM restore: bad CR0 0x%"PRIx64"\n", + ctxt.msr_efer); + return -EINVAL; + } + + if ( ctxt.cr4 & HVM_CR4_GUEST_RESERVED_BITS ) + { + gdprintk(XENLOG_ERR, "HVM restore: bad CR4 0x%"PRIx64"\n", + ctxt.msr_efer); + return -EINVAL; + } + + if ( (ctxt.msr_efer & ~(EFER_LME | EFER_NX | EFER_SCE)) || + ((sizeof(long) != 8) && (ctxt.msr_efer & EFER_LME)) || + (!cpu_has_nx && (ctxt.msr_efer & EFER_NX)) || + (!cpu_has_syscall && (ctxt.msr_efer & EFER_SCE)) || + ((ctxt.msr_efer & (EFER_LME|EFER_LMA)) == EFER_LMA) ) + { + gdprintk(XENLOG_ERR, "HVM restore: bad EFER 0x%"PRIx64"\n", + ctxt.msr_efer); + return -EINVAL; + } + /* Architecture-specific vmcs/vmcb bits */ if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 ) return -EINVAL; @@ -518,6 +558,39 @@ void hvm_triple_fault(void) gdprintk(XENLOG_INFO, "Triple fault on VCPU%d - " "invoking HVM system reset.\n", v->vcpu_id); domain_shutdown(v->domain, SHUTDOWN_reboot); +} + +int hvm_set_efer(uint64_t value) +{ + struct vcpu *v = current; + + value &= ~EFER_LMA; + + if ( (value & ~(EFER_LME | EFER_NX | EFER_SCE)) || + ((sizeof(long) != 8) && (value & EFER_LME)) || + (!cpu_has_nx && (value & EFER_NX)) || + (!cpu_has_syscall && (value & EFER_SCE)) ) + { + gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " + "EFER: %"PRIx64"\n", value); + hvm_inject_exception(TRAP_gp_fault, 0, 0); + return 0; + } + + if ( ((value ^ v->arch.hvm_vcpu.guest_efer) & EFER_LME) && + hvm_paging_enabled(v) ) + { + gdprintk(XENLOG_WARNING, + "Trying to change EFER.LME with paging enabled\n"); + hvm_inject_exception(TRAP_gp_fault, 0, 0); + return 0; + } + + value |= v->arch.hvm_vcpu.guest_efer & EFER_LMA; + v->arch.hvm_vcpu.guest_efer = value; + hvm_update_guest_efer(v); + + return 1; } int hvm_set_cr0(unsigned long value) diff -r d3665dc74a41 -r f71b7d6ad5d8 xen/arch/x86/hvm/svm/svm.c --- a/xen/arch/x86/hvm/svm/svm.c Mon Oct 01 09:59:24 2007 -0600 +++ b/xen/arch/x86/hvm/svm/svm.c Mon Oct 01 13:59:37 2007 -0600 @@ -69,8 +69,7 @@ static void *hsa[NR_CPUS] __read_mostly; /* vmcb used for extended host state */ static void *root_vmcb[NR_CPUS] __read_mostly; -/* hardware assisted paging bits */ -extern int opt_hap_enabled; +static void svm_update_guest_efer(struct vcpu *v); static void inline __update_guest_eip( struct cpu_user_regs *regs, int inst_len) @@ -106,22 +105,10 @@ static void svm_cpu_down(void) write_efer(read_efer() & ~EFER_SVME); } -static int svm_lme_is_set(struct vcpu *v) -{ -#ifdef __x86_64__ - u64 guest_efer = v->arch.hvm_vcpu.guest_efer; - return guest_efer & EFER_LME; -#else - return 0; -#endif -} - static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs) { u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32); u32 ecx = regs->ecx; - struct vcpu *v = current; - struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; HVM_DBG_LOG(DBG_LEVEL_0, "msr %x msr_content %"PRIx64, ecx, msr_content); @@ -129,47 +116,8 @@ static enum handler_return long_mode_do_ switch ( ecx ) { case MSR_EFER: - /* Offending reserved bit will cause #GP. */ -#ifdef __x86_64__ - if ( (msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) || -#else - if ( (msr_content & ~(EFER_NX | EFER_SCE)) || -#endif - (!cpu_has_nx && (msr_content & EFER_NX)) || - (!cpu_has_syscall && (msr_content & EFER_SCE)) ) - { - gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " - "EFER: %"PRIx64"\n", msr_content); - goto gp_fault; - } - - if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) ) - { - /* EFER.LME transition from 0 to 1. */ - if ( hvm_paging_enabled(v) || - !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) ) - { - gdprintk(XENLOG_WARNING, "Trying to set LME bit when " - "in paging mode or PAE bit is not set\n"); - goto gp_fault; - } - } - else if ( !(msr_content & EFER_LME) && svm_lme_is_set(v) ) - { - /* EFER.LME transistion from 1 to 0. */ - if ( hvm_paging_enabled(v) ) - { - gdprintk(XENLOG_WARNING, - "Trying to clear EFER.LME while paging enabled\n"); - goto gp_fault; - } - } - - v->arch.hvm_vcpu.guest_efer = msr_content; - vmcb->efer = msr_content | EFER_SVME; - if ( !hvm_paging_enabled(v) ) - vmcb->efer &= ~(EFER_LME | EFER_LMA); - + if ( !hvm_set_efer(msr_content) ) + return HNDL_exception_raised; break; case MSR_K8_MC4_MISC: /* Threshold register */ @@ -185,10 +133,6 @@ static enum handler_return long_mode_do_ } return HNDL_done; - - gp_fault: - svm_inject_exception(v, TRAP_gp_fault, 1, 0); - return HNDL_exception_raised; } @@ -452,11 +396,7 @@ static void svm_load_cpu_state(struct vc vmcb->cstar = data->msr_cstar; vmcb->sfmask = data->msr_syscall_mask; v->arch.hvm_vcpu.guest_efer = data->msr_efer; - vmcb->efer = data->msr_efer | EFER_SVME; - /* VMCB's EFER.LME isn't set unless we're actually in long mode - * (see long_mode_do_msr_write()) */ - if ( !(vmcb->efer & EFER_LMA) ) - vmcb->efer &= ~EFER_LME; + svm_update_guest_efer(v); hvm_set_guest_time(v, data->tsc); } @@ -546,14 +486,11 @@ static void svm_update_guest_cr(struct v static void svm_update_guest_efer(struct vcpu *v) { -#ifdef __x86_64__ - struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; - - if ( v->arch.hvm_vcpu.guest_efer & EFER_LMA ) - vmcb->efer |= EFER_LME | EFER_LMA; - else - vmcb->efer &= ~(EFER_LME | EFER_LMA); -#endif + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + + vmcb->efer = (v->arch.hvm_vcpu.guest_efer | EFER_SVME) & ~EFER_LME; + if ( vmcb->efer & EFER_LMA ) + vmcb->efer |= EFER_LME; } static void svm_flush_guest_tlbs(void) @@ -936,18 +873,14 @@ static struct hvm_function_table svm_fun .event_pending = svm_event_pending }; -static void svm_npt_detect(void) +static int svm_npt_detect(void) { u32 eax, ebx, ecx, edx; /* Check CPUID for nested paging support. */ cpuid(0x8000000A, &eax, &ebx, &ecx, &edx); - if ( !(edx & 1) && opt_hap_enabled ) - { - printk("SVM: Nested paging is not supported by this CPU.\n"); - opt_hap_enabled = 0; - } + return (edx & 1); } int start_svm(struct cpuinfo_x86 *c) @@ -978,8 +911,6 @@ int start_svm(struct cpuinfo_x86 *c) write_efer(read_efer() | EFER_SVME); - svm_npt_detect(); - /* Initialize the HSA for this core. */ phys_hsa = (u64) virt_to_maddr(hsa[cpu]); phys_hsa_lo = (u32) phys_hsa; @@ -994,11 +925,10 @@ int start_svm(struct cpuinfo_x86 *c) setup_vmcb_dump(); + svm_function_table.hap_supported = svm_npt_detect(); + hvm_enable(&svm_function_table); - if ( opt_hap_enabled ) - printk("SVM: Nested paging enabled.\n"); - return 1; } diff -r d3665dc74a41 -r f71b7d6ad5d8 xen/arch/x86/hvm/vmx/vmx.c --- a/xen/arch/x86/hvm/vmx/vmx.c Mon Oct 01 09:59:24 2007 -0600 +++ b/xen/arch/x86/hvm/vmx/vmx.c Mon Oct 01 13:59:37 2007 -0600 @@ -199,42 +199,8 @@ static enum handler_return long_mode_do_ switch ( ecx ) { case MSR_EFER: - /* offending reserved bit will cause #GP */ - if ( (msr_content & ~(EFER_LME | EFER_LMA | EFER_NX | EFER_SCE)) || - (!cpu_has_nx && (msr_content & EFER_NX)) || - (!cpu_has_syscall && (msr_content & EFER_SCE)) ) - { - gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " - "EFER: %"PRIx64"\n", msr_content); - goto gp_fault; - } - - if ( (msr_content & EFER_LME) - && !(v->arch.hvm_vcpu.guest_efer & EFER_LME) ) - { - if ( unlikely(hvm_paging_enabled(v)) ) - { - gdprintk(XENLOG_WARNING, - "Trying to set EFER.LME with paging enabled\n"); - goto gp_fault; - } - } - else if ( !(msr_content & EFER_LME) - && (v->arch.hvm_vcpu.guest_efer & EFER_LME) ) - { - if ( unlikely(hvm_paging_enabled(v)) ) - { - gdprintk(XENLOG_WARNING, - "Trying to clear EFER.LME with paging enabled\n"); - goto gp_fault; - } - } - - if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & (EFER_NX|EFER_SCE) ) - write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) | - (msr_content & (EFER_NX|EFER_SCE))); - - v->arch.hvm_vcpu.guest_efer = msr_content; + if ( !hvm_set_efer(msr_content) ) + goto exception_raised; break; case MSR_FS_BASE: @@ -285,6 +251,7 @@ static enum handler_return long_mode_do_ HVM_DBG_LOG(DBG_LEVEL_0, "Not cano address of msr write %x", ecx); gp_fault: vmx_inject_hw_exception(v, TRAP_gp_fault, 0); + exception_raised: return HNDL_exception_raised; } @@ -380,7 +347,8 @@ static enum handler_return long_mode_do_ u64 msr_content = 0; struct vcpu *v = current; - switch ( regs->ecx ) { + switch ( regs->ecx ) + { case MSR_EFER: msr_content = v->arch.hvm_vcpu.guest_efer; break; @@ -398,25 +366,12 @@ static enum handler_return long_mode_do_ static enum handler_return long_mode_do_msr_write(struct cpu_user_regs *regs) { u64 msr_content = regs->eax | ((u64)regs->edx << 32); - struct vcpu *v = current; switch ( regs->ecx ) { case MSR_EFER: - /* offending reserved bit will cause #GP */ - if ( (msr_content & ~EFER_NX) || - (!cpu_has_nx && (msr_content & EFER_NX)) ) - { - gdprintk(XENLOG_WARNING, "Trying to set reserved bit in " - "EFER: %"PRIx64"\n", msr_content); - vmx_inject_hw_exception(v, TRAP_gp_fault, 0); + if ( !hvm_set_efer(msr_content) ) return HNDL_exception_raised; - } - - if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & EFER_NX ) - write_efer((read_efer() & ~EFER_NX) | (msr_content & EFER_NX)); - - v->arch.hvm_vcpu.guest_efer = msr_content; break; default: @@ -1096,6 +1051,10 @@ static void vmx_update_guest_efer(struct vmx_vmcs_exit(v); #endif + + if ( v == current ) + write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) | + (v->arch.hvm_vcpu.guest_efer & (EFER_NX|EFER_SCE))); } static void vmx_flush_guest_tlbs(void) diff -r d3665dc74a41 -r f71b7d6ad5d8 xen/arch/x86/hvm/vmx/vtd/intel-iommu.c --- a/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Mon Oct 01 09:59:24 2007 -0600 +++ b/xen/arch/x86/hvm/vmx/vtd/intel-iommu.c Mon Oct 01 13:59:37 2007 -0600 @@ -134,7 +134,7 @@ static int device_context_mapped(struct #define level_mask(l) (((u64)(-1)) << level_to_offset_bits(l)) #define level_size(l) (1 << level_to_offset_bits(l)) #define align_to_level(addr, l) ((addr + level_size(l) - 1) & level_mask(l)) -static struct dma_pte *addr_to_dma_pte(struct domain *domain, u64 addr) +static struct page_info *addr_to_dma_page(struct domain *domain, u64 addr) { struct hvm_iommu *hd = domain_hvm_iommu(domain); struct acpi_drhd_unit *drhd; @@ -144,6 +144,8 @@ static struct dma_pte *addr_to_dma_pte(s int level = agaw_to_level(hd->agaw); int offset; unsigned long flags; + struct page_info *pg = NULL; + u64 *vaddr = NULL; drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); iommu = drhd->iommu; @@ -153,79 +155,105 @@ static struct dma_pte *addr_to_dma_pte(s if ( !hd->pgd ) { pgd = (struct dma_pte *)alloc_xenheap_page(); - if ( !pgd && !hd->pgd ) + if ( !pgd ) { spin_unlock_irqrestore(&hd->mapping_lock, flags); return NULL; } - memset((u8*)pgd, 0, PAGE_SIZE); - if ( !hd->pgd ) - hd->pgd = pgd; - else /* somebody is fast */ - free_xenheap_page((void *) pgd); - } + memset(pgd, 0, PAGE_SIZE); + hd->pgd = pgd; + } + parent = hd->pgd; - while ( level > 0 ) - { - u8 *tmp; + while ( level > 1 ) + { offset = address_level_offset(addr, level); pte = &parent[offset]; - if ( level == 1 ) - break; + if ( dma_pte_addr(*pte) == 0 ) { - tmp = alloc_xenheap_page(); - memset(tmp, 0, PAGE_SIZE); - iommu_flush_cache_page(iommu, tmp); - - if ( !tmp && dma_pte_addr(*pte) == 0 ) + pg = alloc_domheap_page(NULL); + vaddr = map_domain_page(mfn_x(page_to_mfn(pg))); + if ( !vaddr ) { spin_unlock_irqrestore(&hd->mapping_lock, flags); return NULL; } - if ( dma_pte_addr(*pte) == 0 ) + memset(vaddr, 0, PAGE_SIZE); + iommu_flush_cache_page(iommu, vaddr); + + dma_set_pte_addr(*pte, page_to_maddr(pg)); + + /* + * high level table always sets r/w, last level + * page table control read/write + */ + dma_set_pte_readable(*pte); + dma_set_pte_writable(*pte); + iommu_flush_cache_entry(iommu, pte); + } + else + { + pg = maddr_to_page(pte->val); + vaddr = map_domain_page(mfn_x(page_to_mfn(pg))); + if ( !vaddr ) { - dma_set_pte_addr(*pte, - virt_to_maddr(tmp)); - /* - * high level table always sets r/w, last level - * page table control read/write - */ - dma_set_pte_readable(*pte); - dma_set_pte_writable(*pte); - iommu_flush_cache_entry(iommu, pte); - } else /* somebody is fast */ - free_xenheap_page(tmp); - } - parent = maddr_to_virt(dma_pte_addr(*pte)); + spin_unlock_irqrestore(&hd->mapping_lock, flags); + return NULL; + } + } + + if ( parent != hd->pgd ) + unmap_domain_page(parent); + + if ( level == 2 && vaddr ) + { + unmap_domain_page(vaddr); + break; + } + + parent = (struct dma_pte *)vaddr; + vaddr = NULL; level--; } + spin_unlock_irqrestore(&hd->mapping_lock, flags); - return pte; -} - -/* return address's pte at specific level */ -static struct dma_pte *dma_addr_level_pte(struct domain *domain, u64 addr, - int level) + return pg; +} + +/* return address's page at specific level */ +static struct page_info *dma_addr_level_page(struct domain *domain, + u64 addr, int level) { struct hvm_iommu *hd = domain_hvm_iommu(domain); struct dma_pte *parent, *pte = NULL; int total = agaw_to_level(hd->agaw); int offset; + struct page_info *pg = NULL; parent = hd->pgd; while ( level <= total ) { offset = address_level_offset(addr, total); pte = &parent[offset]; + if ( dma_pte_addr(*pte) == 0 ) + { + if ( parent != hd->pgd ) + unmap_domain_page(parent); + break; + } + + pg = maddr_to_page(pte->val); + if ( parent != hd->pgd ) + unmap_domain_page(parent); + if ( level == total ) - return pte; - - if ( dma_pte_addr(*pte) == 0 ) - break; - parent = maddr_to_virt(dma_pte_addr(*pte)); + return pg; + + parent = map_domain_page(mfn_x(page_to_mfn(pg))); total--; } + return NULL; } @@ -506,12 +534,16 @@ static void dma_pte_clear_one(struct dom struct acpi_drhd_unit *drhd; struct iommu *iommu; struct dma_pte *pte = NULL; + struct page_info *pg = NULL; drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); /* get last level pte */ - pte = dma_addr_level_pte(domain, addr, 1); - + pg = dma_addr_level_page(domain, addr, 1); + if ( !pg ) + return; + pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg))); + pte += address_level_offset(addr, 1); if ( pte ) { dma_clear_pte(*pte); @@ -559,6 +591,7 @@ void dma_pte_free_pagetable(struct domai int total = agaw_to_level(hd->agaw); int level; u32 tmp; + struct page_info *pg = NULL; drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); iommu = drhd->iommu; @@ -576,13 +609,16 @@ void dma_pte_free_pagetable(struct domai while ( tmp < end ) { - pte = dma_addr_level_pte(domain, tmp, level); - if ( pte ) - { - free_xenheap_page((void *) maddr_to_virt(dma_pte_addr(*pte))); - dma_clear_pte(*pte); - iommu_flush_cache_entry(iommu, pte); - } + pg = dma_addr_level_page(domain, tmp, level); + if ( !pg ) + return; + pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg))); + pte += address_level_offset(tmp, level); + dma_clear_pte(*pte); + iommu_flush_cache_entry(iommu, pte); + unmap_domain_page(pte); + free_domheap_page(pg); + tmp += level_size(level); } level++; @@ -1445,6 +1481,7 @@ int iommu_map_page(struct domain *d, pad struct acpi_drhd_unit *drhd; struct iommu *iommu; struct dma_pte *pte = NULL; + struct page_info *pg = NULL; drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); iommu = drhd->iommu; @@ -1453,12 +1490,15 @@ int iommu_map_page(struct domain *d, pad if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) ) return 0; - pte = addr_to_dma_pte(d, gfn << PAGE_SHIFT_4K); - if ( !pte ) + pg = addr_to_dma_page(d, gfn << PAGE_SHIFT_4K); + if ( !pg ) return -ENOMEM; + pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg))); + pte += mfn & LEVEL_MASK; dma_set_pte_addr(*pte, mfn << PAGE_SHIFT_4K); dma_set_pte_prot(*pte, DMA_PTE_READ | DMA_PTE_WRITE); iommu_flush_cache_entry(iommu, pte); + unmap_domain_page(pte); for_each_drhd_unit ( drhd ) { @@ -1477,7 +1517,6 @@ int iommu_unmap_page(struct domain *d, d { struct acpi_drhd_unit *drhd; struct iommu *iommu; - struct dma_pte *pte = NULL; drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); iommu = drhd->iommu; @@ -1486,10 +1525,8 @@ int iommu_unmap_page(struct domain *d, d if ( ecap_pass_thru(iommu->ecap) && (d->domain_id == 0) ) return 0; - /* get last level pte */ - pte = dma_addr_level_pte(d, gfn << PAGE_SHIFT_4K, 1); dma_pte_clear_one(d, gfn << PAGE_SHIFT_4K); - + return 0; } @@ -1501,6 +1538,7 @@ int iommu_page_mapping(struct domain *do unsigned long start_pfn, end_pfn; struct dma_pte *pte = NULL; int index; + struct page_info *pg = NULL; drhd = list_entry(acpi_drhd_units.next, typeof(*drhd), list); iommu = drhd->iommu; @@ -1513,12 +1551,15 @@ int iommu_page_mapping(struct domain *do index = 0; while ( start_pfn < end_pfn ) { - pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); - if ( !pte ) + pg = addr_to_dma_page(domain, iova + PAGE_SIZE_4K * index); + if ( !pg ) return -ENOMEM; + pte = (struct dma_pte *)map_domain_page(mfn_x(page_to_mfn(pg))); + pte += start_pfn & LEVEL_MASK; dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); dma_set_pte_prot(*pte, prot); iommu_flush_cache_entry(iommu, pte); + unmap_domain_page(pte); start_pfn++; index++; } @@ -1537,12 +1578,8 @@ int iommu_page_mapping(struct domain *do int iommu_page_unmapping(struct domain *domain, dma_addr_t addr, size_t size) { - struct dma_pte *pte = NULL; - - /* get last level pte */ - pte = dma_addr_level_pte(domain, addr, 1); dma_pte_clear_range(domain, addr, addr + size); - + return 0; } diff -r d3665dc74a41 -r f71b7d6ad5d8 xen/arch/x86/mm/paging.c --- a/xen/arch/x86/mm/paging.c Mon Oct 01 09:59:24 2007 -0600 +++ b/xen/arch/x86/mm/paging.c Mon Oct 01 13:59:37 2007 -0600 @@ -28,9 +28,7 @@ #include <asm/guest_access.h> #include <xsm/xsm.h> -/* Xen command-line option to enable hardware-assisted paging */ -int opt_hap_enabled; -boolean_param("hap", opt_hap_enabled); +#define hap_enabled(d) (hvm_funcs.hap_supported && is_hvm_domain(d)) /* Printouts */ #define PAGING_PRINTK(_f, _a...) \ @@ -363,14 +361,14 @@ void paging_domain_init(struct domain *d shadow_domain_init(d); /* ... but we will use hardware assistance if it's available. */ - if ( opt_hap_enabled && is_hvm_domain(d) ) + if ( hap_enabled(d) ) hap_domain_init(d); } /* vcpu paging struct initialization goes here */ void paging_vcpu_init(struct vcpu *v) { - if ( opt_hap_enabled && is_hvm_vcpu(v) ) + if ( hap_enabled(v->domain) ) hap_vcpu_init(v); else shadow_vcpu_init(v); @@ -434,7 +432,7 @@ int paging_domctl(struct domain *d, xen_ } /* Here, dispatch domctl to the appropriate paging code */ - if ( opt_hap_enabled && is_hvm_domain(d) ) + if ( hap_enabled(d) ) return hap_domctl(d, sc, u_domctl); else return shadow_domctl(d, sc, u_domctl); @@ -443,7 +441,7 @@ int paging_domctl(struct domain *d, xen_ /* Call when destroying a domain */ void paging_teardown(struct domain *d) { - if ( opt_hap_enabled && is_hvm_domain(d) ) + if ( hap_enabled(d) ) hap_teardown(d); else shadow_teardown(d); @@ -455,7 +453,7 @@ void paging_teardown(struct domain *d) /* Call once all of the references to the domain have gone away */ void paging_final_teardown(struct domain *d) { - if ( opt_hap_enabled && is_hvm_domain(d) ) + if ( hap_enabled(d) ) hap_final_teardown(d); else shadow_final_teardown(d); @@ -465,7 +463,7 @@ void paging_final_teardown(struct domain * creation. */ int paging_enable(struct domain *d, u32 mode) { - if ( opt_hap_enabled && is_hvm_domain(d) ) + if ( hap_enabled(d) ) return hap_enable(d, mode | PG_HAP_enable); else return shadow_enable(d, mode | PG_SH_enable); diff -r d3665dc74a41 -r f71b7d6ad5d8 xen/include/asm-x86/hvm/hvm.h --- a/xen/include/asm-x86/hvm/hvm.h Mon Oct 01 09:59:24 2007 -0600 +++ b/xen/include/asm-x86/hvm/hvm.h Mon Oct 01 13:59:37 2007 -0600 @@ -71,6 +71,9 @@ enum hvm_intack { */ struct hvm_function_table { char *name; + + /* Support Hardware-Assisted Paging? */ + int hap_supported; /* * Initialise/destroy HVM domain/vcpu resources diff -r d3665dc74a41 -r f71b7d6ad5d8 xen/include/asm-x86/hvm/support.h --- a/xen/include/asm-x86/hvm/support.h Mon Oct 01 09:59:24 2007 -0600 +++ b/xen/include/asm-x86/hvm/support.h Mon Oct 01 13:59:37 2007 -0600 @@ -234,6 +234,7 @@ void hvm_hlt(unsigned long rflags); void hvm_hlt(unsigned long rflags); void hvm_triple_fault(void); +int hvm_set_efer(uint64_t value); int hvm_set_cr0(unsigned long value); int hvm_set_cr3(unsigned long value); int hvm_set_cr4(unsigned long value); diff -r d3665dc74a41 -r f71b7d6ad5d8 xen/include/xsm/acm/acm_core.h --- a/xen/include/xsm/acm/acm_core.h Mon Oct 01 09:59:24 2007 -0600 +++ b/xen/include/xsm/acm/acm_core.h Mon Oct 01 13:59:37 2007 -0600 @@ -154,7 +154,7 @@ static inline int acm_array_append_tuple /* protos */ int acm_init_domain_ssid(struct domain *, ssidref_t ssidref); -void acm_free_domain_ssid(struct acm_ssid_domain *ssid); +void acm_free_domain_ssid(struct domain *); int acm_init_binary_policy(u32 policy_code); int acm_set_policy(XEN_GUEST_HANDLE_64(void) buf, u32 buf_size); int do_acm_set_policy(void *buf, u32 buf_size, int is_bootpolicy, diff -r d3665dc74a41 -r f71b7d6ad5d8 xen/include/xsm/acm/acm_hooks.h --- a/xen/include/xsm/acm/acm_hooks.h Mon Oct 01 09:59:24 2007 -0600 +++ b/xen/include/xsm/acm/acm_hooks.h Mon Oct 01 13:59:37 2007 -0600 @@ -258,7 +258,7 @@ static inline void acm_domain_destroy(st acm_secondary_ops->domain_destroy(ssid, d); /* free security ssid for the destroyed domain (also if null policy */ acm_domain_ssid_off_list(ssid); - acm_free_domain_ssid((struct acm_ssid_domain *)(ssid)); + acm_free_domain_ssid(d); } } @@ -294,7 +294,7 @@ static inline int acm_domain_create(stru { acm_domain_ssid_onto_list(d->ssid); } else { - acm_free_domain_ssid(d->ssid); + acm_free_domain_ssid(d); } error_out: diff -r d3665dc74a41 -r f71b7d6ad5d8 xen/xsm/acm/acm_core.c --- a/xen/xsm/acm/acm_core.c Mon Oct 01 09:59:24 2007 -0600 +++ b/xen/xsm/acm/acm_core.c Mon Oct 01 13:59:37 2007 -0600 @@ -361,7 +361,7 @@ int acm_init_domain_ssid(struct domain * { printk("%s: ERROR instantiating individual ssids for domain 0x%02x.\n", __func__, subj->domain_id); - acm_free_domain_ssid(ssid); + acm_free_domain_ssid(subj); return ACM_INIT_SSID_ERROR; } @@ -372,8 +372,10 @@ int acm_init_domain_ssid(struct domain * void -acm_free_domain_ssid(struct acm_ssid_domain *ssid) -{ +acm_free_domain_ssid(struct domain *d) +{ + struct acm_ssid_domain *ssid = d->ssid; + /* domain is already gone, just ssid is left */ if (ssid == NULL) return; @@ -387,6 +389,8 @@ acm_free_domain_ssid(struct acm_ssid_dom ssid->secondary_ssid = NULL; xfree(ssid); + d->ssid = NULL; + printkd("%s: Freed individual domain ssid (domain=%02x).\n", __func__, id); } _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |