|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH] Fix ioreq-server event channel vulnerability
> -----Original Message-----
> From: Andrew Cooper
> Sent: 14 July 2014 14:53
> To: Paul Durrant; xen-devel@xxxxxxxxxxxxx
> Cc: Keir (Xen.org); Jan Beulich
> Subject: Re: [PATCH] Fix ioreq-server event channel vulnerability
>
> On 14/07/14 11:21, Paul Durrant wrote:
> > The code in hvm_send_assist_req_to_ioreq_server() and
> hvm_do_resume() uses
> > an event channel port number taken from the page of memory shared
> with the
> > emulator. This allows an emulator to corrupt values that are then blindly
> > used by Xen, leading to assertion failures in some cases. Moreover, in the
> > case of the default ioreq server the page remains in the guest p2m so a
> > malicious guest could similarly corrupt those values.
> >
> > This patch changes the afforementioned functions to get the event
> channel
> > port number from an internal structure and also adds an extra check to
> > hvm_send_assist_req_to_ioreq_server() which will crash the domain
> should the
> > guest or an emulator corrupt the port number in the shared page.
> >
> > Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
> > Reported-by: Wen Congyang <wency@xxxxxxxxxxxxxx>
> > Cc: Keir Fraser <keir@xxxxxxx>
> > Cc: Jan Beulich <jbeulich@xxxxxxxx>
> > Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> > ---
> > xen/arch/x86/hvm/hvm.c | 112
> ++++++++++++++++++++++++++++++++----------------
> > 1 file changed, 75 insertions(+), 37 deletions(-)
> >
> > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> > index ef2411c..9a09ee1 100644
> > --- a/xen/arch/x86/hvm/hvm.c
> > +++ b/xen/arch/x86/hvm/hvm.c
> > @@ -392,6 +392,31 @@ bool_t hvm_io_pending(struct vcpu *v)
> > return 0;
> > }
> >
> > +static void hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
> > +{
> > + /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE).
> */
> > + while ( p->state != STATE_IOREQ_NONE )
> > + {
> > + switch ( p->state )
> > + {
> > + case STATE_IORESP_READY: /* IORESP_READY -> NONE */
> > + rmb(); /* see IORESP_READY /then/ read contents of ioreq */
> > + hvm_io_assist(p);
> > + break;
> > + case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} ->
> IORESP_READY */
> > + case STATE_IOREQ_INPROCESS:
> > + wait_on_xen_event_channel(sv->ioreq_evtchn,
> > + (p->state != STATE_IOREQ_READY) &&
> > + (p->state != STATE_IOREQ_INPROCESS));
> > + break;
> > + default:
> > + gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p-
> >state);
> > + domain_crash(sv->vcpu->domain);
> > + return; /* bail */
> > + }
> > + }
> > +}
> > +
> > void hvm_do_resume(struct vcpu *v)
> > {
> > struct domain *d = v->domain;
> > @@ -406,27 +431,17 @@ void hvm_do_resume(struct vcpu *v)
> > &d->arch.hvm_domain.ioreq_server.list,
> > list_entry )
> > {
> > - ioreq_t *p = get_ioreq(s, v);
> > + struct hvm_ioreq_vcpu *sv;
> >
> > - /* NB. Optimised for common case (p->state ==
> STATE_IOREQ_NONE). */
> > - while ( p->state != STATE_IOREQ_NONE )
> > + list_for_each_entry ( sv,
> > + &s->ioreq_vcpu_list,
> > + list_entry )
> > {
> > - switch ( p->state )
> > + if ( sv->vcpu == v )
> > {
> > - case STATE_IORESP_READY: /* IORESP_READY -> NONE */
> > - rmb(); /* see IORESP_READY /then/ read contents of ioreq */
> > - hvm_io_assist(p);
> > - break;
> > - case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} ->
> IORESP_READY */
> > - case STATE_IOREQ_INPROCESS:
> > - wait_on_xen_event_channel(p->vp_eport,
> > - (p->state != STATE_IOREQ_READY)
> > &&
> > - (p->state !=
> > STATE_IOREQ_INPROCESS));
> > - break;
> > - default:
> > - gdprintk(XENLOG_ERR, "Weird HVM iorequest state %d.\n", p-
> >state);
> > - domain_crash(d);
> > - return; /* bail */
>
> By pulling this return into hvm_wait_for_io(), you now continue through
> hvm_do_resume() even after deciding to crash the domain, which has
> changed the error behaviour and doesn't look like it will go well with
> hvm_inject_trap() out of context below. You probably want a boolean
> return from hvm_wait_for_io().
Yes, that's a good point. I do need to bail.
>
> > + ioreq_t *p = get_ioreq(s, v);
> > +
> > + hvm_wait_for_io(sv, p);
>
> You presumably want to break at this point, or you will pointlessly
> continue the list_for_each_entry() loop. You can also get away with
> folding these 3 lines together and doing away with 'p'.
Yes, the inner loop should be exited.
Thanks,
Paul
>
> ~Andrew
>
> > }
> > }
> > }
> > @@ -2545,35 +2560,58 @@ bool_t
> hvm_send_assist_req_to_ioreq_server(struct hvm_ioreq_server *s,
> > {
> > struct vcpu *curr = current;
> > struct domain *d = curr->domain;
> > - ioreq_t *p;
> > + struct hvm_ioreq_vcpu *sv;
> >
> > if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
> > return 0; /* implicitly bins the i/o operation */
> >
> > - p = get_ioreq(s, curr);
> > -
> > - if ( unlikely(p->state != STATE_IOREQ_NONE) )
> > + list_for_each_entry ( sv,
> > + &s->ioreq_vcpu_list,
> > + list_entry )
> > {
> > - /* This indicates a bug in the device model. Crash the domain. */
> > - gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p-
> >state);
> > - domain_crash(d);
> > - return 0;
> > - }
> > + if ( sv->vcpu == curr )
> > + {
> > + evtchn_port_t port = sv->ioreq_evtchn;
> > + ioreq_t *p = get_ioreq(s, curr);
> >
> > - proto_p->state = STATE_IOREQ_NONE;
> > - proto_p->vp_eport = p->vp_eport;
> > - *p = *proto_p;
> > + if ( unlikely(p->state != STATE_IOREQ_NONE) )
> > + {
> > + gdprintk(XENLOG_ERR,
> > + "Device model set bad IO state %d.\n",
> > + p->state);
> > + goto crash;
> > + }
> >
> > - prepare_wait_on_xen_event_channel(p->vp_eport);
> > + if ( unlikely(p->vp_eport != port) )
> > + {
> > + gdprintk(XENLOG_ERR,
> > + "Device model set bad event channel %d.\n",
> > + p->vp_eport);
> > + goto crash;
> > + }
> >
> > - /*
> > - * Following happens /after/ blocking and setting up ioreq contents.
> > - * prepare_wait_on_xen_event_channel() is an implicit barrier.
> > - */
> > - p->state = STATE_IOREQ_READY;
> > - notify_via_xen_event_channel(d, p->vp_eport);
> > + proto_p->state = STATE_IOREQ_NONE;
> > + proto_p->vp_eport = port;
> > + *p = *proto_p;
> > +
> > + prepare_wait_on_xen_event_channel(port);
> > +
> > + /*
> > + * Following happens /after/ blocking and setting up ioreq
> > + * contents. prepare_wait_on_xen_event_channel() is an implicit
> > + * barrier.
> > + */
> > + p->state = STATE_IOREQ_READY;
> > + notify_via_xen_event_channel(d, port);
> > + break;
> > + }
> > + }
> >
> > return 1;
> > +
> > + crash:
> > + domain_crash(d);
> > + return 0;
> > }
> >
> > bool_t hvm_send_assist_req(ioreq_t *p)
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |