[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 00 of 25] libxc: Hypercall buffers



On Tue, 2010-10-26 at 16:37 +0100, Olaf Hering wrote:
> Ian,
> 
> is the usage like shown below ok, adding an offset +start to the
> initial buffer? 

Unfortunately I think you need to extend the infrastructure to make it
work.

es is a pointer to the unbounced buffer so you can't just take offsets
from it etc.

I think you need to add a set_xen_guest_handle_offset used as
        set_xen_guest_handle_offset(reservation->extent_start, es, start)
or whatever. (I think this actually makes es useless so you can bounce
extent_start directly and use extent_start directly).

Ian.

> Olaf
> 
> --- xen-unstable.hg-4.1.22313.orig/tools/libxc/xc_domain.c
> +++ xen-unstable.hg-4.1.22313/tools/libxc/xc_domain.c
> @@ -572,6 +572,55 @@ int xc_domain_get_tsc_info(xc_interface
>      return rc;
>  }
>  
> +static int do_xenmem_op_retry(xc_interface *xch, int cmd, struct 
> xen_memory_reservation *reservation, size_t len, unsigned long nr_extents, 
> xen_pfn_t *extent_start)
> +{
> +    xen_pfn_t *es = extent_start;
> +    DECLARE_HYPERCALL_BOUNCE(es, nr_extents * sizeof(*es), 
> XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
> +    int err = 0;
> +    unsigned long count = nr_extents;
> +    unsigned long delay = 0;
> +    unsigned long start = 0;
> +
> +    fprintf(stderr, "%s: %d count %lx\n",__func__,cmd,count);
> +
> +    if ( xc_hypercall_bounce_pre(xch, es) )
> +    {
> +        PERROR("Could not bounce memory for XENMEM_* hypercall");
> +        return -1;
> +    }
> +
> +    while ( start < nr_extents )
> +    {
> +        es = extent_start + start;
> +        set_xen_guest_handle(reservation->extent_start, es);
> +        reservation->nr_extents = count;
> +
> +        err = do_memory_op(xch, cmd, reservation, len);
> +        if ( err == count )
> +            break;
> +
> +        if ( err > count || err < 0 )
> +            break;
> +
> +        if ( delay > 1000 * 1000)
> +        {
> +            err = start;
> +            break;
> +        }
> +
> +        if ( err )
> +            delay = 0;
> +
> +        start += err;
> +        count -= err;
> +        usleep(delay);
> +        delay += 666; /* 1500 iterations, 12 seconds */
> +    }
> +    fprintf(stderr, "%s: %d err %x count %lx start %lx delay 
> %lu/%lu\n",__func__,cmd,err,count,start,delay,delay/666);
> +
> +    xc_hypercall_bounce_post(xch, es);
> +    return err;
> +}
>  
>  int xc_domain_maximum_gpfn(xc_interface *xch, domid_t domid)
>  {
> @@ -643,10 +692,7 @@ int xc_domain_decrease_reservation(xc_in
>                                     unsigned int extent_order,
>                                     xen_pfn_t *extent_start)
>  {
> -    int err;
> -    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * 
> sizeof(*extent_start), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
>      struct xen_memory_reservation reservation = {
> -        .nr_extents   = nr_extents,
>          .extent_order = extent_order,
>          .mem_flags    = 0,
>          .domid        = domid
> @@ -659,18 +705,7 @@ int xc_domain_decrease_reservation(xc_in
>          return -1;
>      }
>  
> -    if ( xc_hypercall_bounce_pre(xch, extent_start) )
> -    {
> -        PERROR("Could not bounce memory for XENMEM_decrease_reservation 
> hypercall");
> -        return -1;
> -    }
> -    set_xen_guest_handle(reservation.extent_start, extent_start);
> -
> -    err = do_memory_op(xch, XENMEM_decrease_reservation, &reservation, 
> sizeof(reservation));
> -
> -    xc_hypercall_bounce_post(xch, extent_start);
> -
> -    return err;
> +    return do_xenmem_op_retry(xch, XENMEM_decrease_reservation, 
> &reservation, sizeof(reservation), nr_extents, extent_start);
>  }
>  
>  int xc_domain_decrease_reservation_exact(xc_interface *xch,
> @@ -704,13 +739,20 @@ int xc_domain_add_to_physmap(xc_interfac
>                               unsigned long idx,
>                               xen_pfn_t gpfn)
>  {
> +    uint8_t delay = 0;
> +    int rc;
>      struct xen_add_to_physmap xatp = {
>          .domid = domid,
>          .space = space,
>          .idx = idx,
>          .gpfn = gpfn,
>      };
> -    return do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
> +    do {
> +        rc = do_memory_op(xch, XENMEM_add_to_physmap, &xatp, sizeof(xatp));
> +        if ( rc < 0 && errno == ENOENT )
> +            usleep(1000);
> +    } while ( rc < 0 && errno == ENOENT && ++delay );
> +    return rc;
>  }
>  
>  int xc_domain_populate_physmap(xc_interface *xch,
> @@ -720,26 +762,13 @@ int xc_domain_populate_physmap(xc_interf
>                                 unsigned int mem_flags,
>                                 xen_pfn_t *extent_start)
>  {
> -    int err;
> -    DECLARE_HYPERCALL_BOUNCE(extent_start, nr_extents * 
> sizeof(*extent_start), XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
>      struct xen_memory_reservation reservation = {
> -        .nr_extents   = nr_extents,
>          .extent_order = extent_order,
>          .mem_flags    = mem_flags,
>          .domid        = domid
>      };
>  
> -    if ( xc_hypercall_bounce_pre(xch, extent_start) )
> -    {
> -        PERROR("Could not bounce memory for XENMEM_populate_physmap 
> hypercall");
> -        return -1;
> -    }
> -    set_xen_guest_handle(reservation.extent_start, extent_start);
> -
> -    err = do_memory_op(xch, XENMEM_populate_physmap, &reservation, 
> sizeof(reservation));
> -
> -    xc_hypercall_bounce_post(xch, extent_start);
> -    return err;
> +    return do_xenmem_op_retry(xch, XENMEM_populate_physmap, &reservation, 
> sizeof(reservation), nr_extents, extent_start);
>  }
>  
>  int xc_domain_populate_physmap_exact(xc_interface *xch,
> @@ -799,6 +828,7 @@ int xc_domain_memory_exchange_pages(xc_i
>      set_xen_guest_handle(exchange.in.extent_start, in_extents);
>      set_xen_guest_handle(exchange.out.extent_start, out_extents);
>  
> +    /* FIXME use do_xenmem_op_retry or some retry loop??? */
>      rc = do_memory_op(xch, XENMEM_exchange, &exchange, sizeof(exchange));
>  
>  out:



_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.