[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 3/4] xen: introduce an event channel for buffered io event notifications



On Tue, 2011-11-15 at 14:51 +0000, stefano.stabellini@xxxxxxxxxxxxx
wrote:
> From: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> 
> Use the newly introduced HVM_PARAM_BUFIOREQ_EVTCHN to receive
> notifications for buffered io events.
> After the first notification is received leave the event channel masked
> and setup a timer to process the rest of the batch.
> Once we have completed processing the batch, unmask the event channel
> and delete the timer.
> 
> Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> ---
>  xen-all.c |   38 ++++++++++++++++++++++++++++++++------
>  1 files changed, 32 insertions(+), 6 deletions(-)
> 
> diff --git a/xen-all.c b/xen-all.c
> index b5e28ab..b28d7e7 100644
> --- a/xen-all.c
> +++ b/xen-all.c
> @@ -70,6 +70,8 @@ typedef struct XenIOState {
>      QEMUTimer *buffered_io_timer;
>      /* the evtchn port for polling the notification, */
>      evtchn_port_t *ioreq_local_port;
> +    /* evtchn local port for buffered io */
> +    evtchn_port_t bufioreq_local_port;
>      /* the evtchn fd for polling */
>      XenEvtchn xce_handle;
>      /* which vcpu we are serving */
> @@ -516,6 +518,12 @@ static ioreq_t *cpu_get_ioreq(XenIOState *state)
>      evtchn_port_t port;
>  
>      port = xc_evtchn_pending(state->xce_handle);
> +    if (port == state->bufioreq_local_port) {
> +        qemu_mod_timer(state->buffered_io_timer,
> +                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
> +        return NULL;
> +    }
> +
>      if (port != -1) {
>          for (i = 0; i < smp_cpus; i++) {
>              if (state->ioreq_local_port[i] == port) {
> @@ -664,16 +672,18 @@ static void handle_ioreq(ioreq_t *req)
>      }
>  }
>  
> -static void handle_buffered_iopage(XenIOState *state)
> +static int handle_buffered_iopage(XenIOState *state)
>  {
>      buf_ioreq_t *buf_req = NULL;
>      ioreq_t req;
>      int qw;
>  
>      if (!state->buffered_io_page) {
> -        return;
> +        return 0;
>      }
>  
> +    memset(&req, 0x00, sizeof(req));
> +
>      while (state->buffered_io_page->read_pointer != 
> state->buffered_io_page->write_pointer) {
>          buf_req = &state->buffered_io_page->buf_ioreq[
>              state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
> @@ -698,15 +708,21 @@ static void handle_buffered_iopage(XenIOState *state)
>          xen_mb();
>          state->buffered_io_page->read_pointer += qw ? 2 : 1;
>      }
> +
> +    return req.count;
>  }
>  
>  static void handle_buffered_io(void *opaque)
>  {
>      XenIOState *state = opaque;
>  
> -    handle_buffered_iopage(state);
> -    qemu_mod_timer(state->buffered_io_timer,
> -                   BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
> +    if (handle_buffered_iopage(state)) {
> +        qemu_mod_timer(state->buffered_io_timer,
> +                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
> +    } else {
> +        qemu_del_timer(state->buffered_io_timer);
> +        xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
> +    }
>  }
>  
>  static void cpu_handle_ioreq(void *opaque)
> @@ -836,7 +852,6 @@ static void xen_main_loop_prepare(XenIOState *state)
>  
>      state->buffered_io_timer = qemu_new_timer_ms(rt_clock, 
> handle_buffered_io,
>                                                   state);
> -    qemu_mod_timer(state->buffered_io_timer, qemu_get_clock_ms(rt_clock));
>  
>      if (evtchn_fd != -1) {
>          qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
> @@ -888,6 +903,7 @@ int xen_hvm_init(void)
>  {
>      int i, rc;
>      unsigned long ioreq_pfn;
> +    unsigned long bufioreq_evtchn;
>      XenIOState *state;
>  
>      state = g_malloc0(sizeof (XenIOState));
> @@ -937,6 +953,16 @@ int xen_hvm_init(void)
>          state->ioreq_local_port[i] = rc;
>      }
>  
> +    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
> +            &bufioreq_evtchn);
> +    rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
> +            (uint32_t)bufioreq_evtchn);
> +    if (rc == -1) {
> +        fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
> +        return -1;
> +    }
> +    state->bufioreq_local_port = rc;

Does this fallback gracefully on hypervisors which don't have this new
hvm param? It doesn't look like it but perhaps I'm missing something.

> +
>      /* Init RAM management */
>      xen_map_cache_init();
>      xen_ram_init(ram_size);



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.