[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen stable-4.2] x86: properly handle hvm_copy_from_guest_{phys, virt}() errors
commit 62f395f6ae43a0f2c3d48db4ca2b2535cb6f490f Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Mon Sep 30 14:26:18 2013 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Mon Sep 30 14:26:18 2013 +0200 x86: properly handle hvm_copy_from_guest_{phys,virt}() errors Ignoring them generally implies using uninitialized data and, in all but two of the cases dealt with here, potentially leaking hypervisor stack contents to guests. This is CVE-2013-4355 / XSA-63. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Tim Deegan <tim@xxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> master commit: 6bb838e7375f5b031e9ac346b353775c90de45dc master date: 2013-09-30 14:17:46 +0200 --- xen/arch/x86/hvm/hvm.c | 18 ++++--------- xen/arch/x86/hvm/intercept.c | 49 +++++++++++++++++++++++++++++++------- xen/arch/x86/hvm/io.c | 24 +++++++++++++----- xen/arch/x86/hvm/vmx/realmode.c | 6 ++-- 4 files changed, 66 insertions(+), 31 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index 7792ca8..02b9b55 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2247,11 +2247,7 @@ void hvm_task_switch( rc = hvm_copy_from_guest_virt( &tss, prev_tr.base, sizeof(tss), PFEC_page_present); - if ( rc == HVMCOPY_bad_gva_to_gfn ) - goto out; - if ( rc == HVMCOPY_gfn_paged_out ) - goto out; - if ( rc == HVMCOPY_gfn_shared ) + if ( rc != HVMCOPY_okay ) goto out; eflags = regs->eflags; @@ -2296,13 +2292,11 @@ void hvm_task_switch( rc = hvm_copy_from_guest_virt( &tss, tr.base, sizeof(tss), PFEC_page_present); - if ( rc == HVMCOPY_bad_gva_to_gfn ) - goto out; - if ( rc == HVMCOPY_gfn_paged_out ) - goto out; - /* Note: this could be optimised, if the callee functions knew we want RO - * access */ - if ( rc == HVMCOPY_gfn_shared ) + /* + * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee + * functions knew we want RO access. + */ + if ( rc != HVMCOPY_okay ) goto out; diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c index af22d11..2ccc39e 100644 --- a/xen/arch/x86/hvm/intercept.c +++ b/xen/arch/x86/hvm/intercept.c @@ -87,17 +87,28 @@ static int hvm_mmio_access(struct vcpu *v, { for ( i = 0; i < p->count; i++ ) { - int ret; - - ret = hvm_copy_from_guest_phys(&data, - p->data + (sign * i * p->size), - p->size); - if ( (ret == HVMCOPY_gfn_paged_out) || - (ret == HVMCOPY_gfn_shared) ) + switch ( hvm_copy_from_guest_phys(&data, + p->data + sign * i * p->size, + p->size) ) { + case HVMCOPY_okay: + break; + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: rc = X86EMUL_RETRY; break; + case HVMCOPY_bad_gfn_to_mfn: + data = ~0; + break; + case HVMCOPY_bad_gva_to_gfn: + ASSERT(0); + /* fall through */ + default: + rc = X86EMUL_UNHANDLEABLE; + break; } + if ( rc != X86EMUL_OKAY ) + break; rc = write_handler(v, p->addr + (sign * i * p->size), p->size, data); if ( rc != X86EMUL_OKAY ) @@ -165,8 +176,28 @@ static int process_portio_intercept(portio_action_t action, ioreq_t *p) for ( i = 0; i < p->count; i++ ) { data = 0; - (void)hvm_copy_from_guest_phys(&data, p->data + sign*i*p->size, - p->size); + switch ( hvm_copy_from_guest_phys(&data, + p->data + sign * i * p->size, + p->size) ) + { + case HVMCOPY_okay: + break; + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: + rc = X86EMUL_RETRY; + break; + case HVMCOPY_bad_gfn_to_mfn: + data = ~0; + break; + case HVMCOPY_bad_gva_to_gfn: + ASSERT(0); + /* fall through */ + default: + rc = X86EMUL_UNHANDLEABLE; + break; + } + if ( rc != X86EMUL_OKAY ) + break; rc = action(IOREQ_WRITE, p->addr, p->size, &data); if ( rc != X86EMUL_OKAY ) break; diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c index c20f4e8..7cf0d36 100644 --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -340,14 +340,24 @@ static int dpci_ioport_write(uint32_t mport, ioreq_t *p) data = p->data; if ( p->data_is_ptr ) { - int ret; - - ret = hvm_copy_from_guest_phys(&data, - p->data + (sign * i * p->size), - p->size); - if ( (ret == HVMCOPY_gfn_paged_out) && - (ret == HVMCOPY_gfn_shared) ) + switch ( hvm_copy_from_guest_phys(&data, + p->data + sign * i * p->size, + p->size) ) + { + case HVMCOPY_okay: + break; + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: return X86EMUL_RETRY; + case HVMCOPY_bad_gfn_to_mfn: + data = ~0; + break; + case HVMCOPY_bad_gva_to_gfn: + ASSERT(0); + /* fall through */ + default: + return X86EMUL_UNHANDLEABLE; + } } switch ( p->size ) diff --git a/xen/arch/x86/hvm/vmx/realmode.c b/xen/arch/x86/hvm/vmx/realmode.c index ce7a13b..a942b70 100644 --- a/xen/arch/x86/hvm/vmx/realmode.c +++ b/xen/arch/x86/hvm/vmx/realmode.c @@ -39,7 +39,9 @@ static void realmode_deliver_exception( again: last_byte = (vector * 4) + 3; - if ( idtr->limit < last_byte ) + if ( idtr->limit < last_byte || + hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) != + HVMCOPY_okay ) { /* Software interrupt? */ if ( insn_len != 0 ) @@ -64,8 +66,6 @@ static void realmode_deliver_exception( } } - (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4); - frame[0] = regs->eip + insn_len; frame[1] = csr->sel; frame[2] = regs->eflags & ~X86_EFLAGS_RF; -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.2 _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |