|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 4/6] VMX: correct PDPTE load checks
Checking the low 5 bits of CR3 is not the job of vmx_load_pdptrs().
Instead it should #GP upon bad PDPTE values, rather than causing a VM
entry failure.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1361,20 +1361,18 @@ static void vmx_set_interrupt_shadow(str
__vmwrite(GUEST_INTERRUPTIBILITY_INFO, intr_shadow);
}
-static void vmx_load_pdptrs(struct vcpu *v)
+static bool vmx_load_pdptrs(struct vcpu *v)
{
unsigned long cr3 = v->arch.hvm_vcpu.guest_cr[3];
- uint64_t *guest_pdptes;
+ uint64_t *guest_pdptes, valid_mask;
struct page_info *page;
p2m_type_t p2mt;
char *p;
+ unsigned int i;
/* EPT needs to load PDPTRS into VMCS for PAE. */
if ( !hvm_pae_enabled(v) || (v->arch.hvm_vcpu.guest_efer & EFER_LMA) )
- return;
-
- if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
- goto crash;
+ return true;
page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
if ( !page )
@@ -1385,34 +1383,47 @@ static void vmx_load_pdptrs(struct vcpu
gdprintk(XENLOG_ERR,
"Bad cr3 on load pdptrs gfn %lx type %d\n",
cr3 >> PAGE_SHIFT, (int) p2mt);
- goto crash;
+ domain_crash(v->domain);
+ return false;
}
p = __map_domain_page(page);
- guest_pdptes = (uint64_t *)(p + (cr3 & ~PAGE_MASK));
+ guest_pdptes = (uint64_t *)(p + (cr3 & 0xfe0));
- /*
- * We do not check the PDPTRs for validity. The CPU will do this during
- * vm entry, and we can handle the failure there and crash the guest.
- * The only thing we could do better here is #GP instead.
- */
+ valid_mask = ((1ULL << v->domain->arch.cpuid->extd.maxphysaddr) - 1) &
+ (PAGE_MASK | _PAGE_AVAIL | _PAGE_PRESENT);
+ for ( i = 0; i < 4; ++i )
+ if ( (guest_pdptes[i] & _PAGE_PRESENT) &&
+ (guest_pdptes[i] & ~valid_mask) )
+ {
+ if ( v == current )
+ hvm_inject_hw_exception(TRAP_gp_fault, 0);
+ else
+ {
+ printk(XENLOG_G_ERR "%pv: bad PDPTE%u: %"PRIx64"\n",
+ v, i, guest_pdptes[i]);
+ domain_crash(v->domain);
+ }
+ break;
+ }
- vmx_vmcs_enter(v);
+ if ( i == 4 )
+ {
+ vmx_vmcs_enter(v);
- __vmwrite(GUEST_PDPTE(0), guest_pdptes[0]);
- __vmwrite(GUEST_PDPTE(1), guest_pdptes[1]);
- __vmwrite(GUEST_PDPTE(2), guest_pdptes[2]);
- __vmwrite(GUEST_PDPTE(3), guest_pdptes[3]);
+ __vmwrite(GUEST_PDPTE(0), guest_pdptes[0]);
+ __vmwrite(GUEST_PDPTE(1), guest_pdptes[1]);
+ __vmwrite(GUEST_PDPTE(2), guest_pdptes[2]);
+ __vmwrite(GUEST_PDPTE(3), guest_pdptes[3]);
- vmx_vmcs_exit(v);
+ vmx_vmcs_exit(v);
+ }
unmap_domain_page(p);
put_page(page);
- return;
- crash:
- domain_crash(v->domain);
+ return i == 4;
}
static void vmx_update_host_cr3(struct vcpu *v)
@@ -1621,7 +1632,8 @@ static void vmx_update_guest_cr(struct v
if ( !hvm_paging_enabled(v) && !vmx_unrestricted_guest(v) )
v->arch.hvm_vcpu.hw_cr[3] =
v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT];
- vmx_load_pdptrs(v);
+ if ( !vmx_load_pdptrs(v) )
+ break;
}
__vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |