[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 2/2] qdisk - hw/block/xen_disk: grant copy implementation



On Tue, Aug 02, 2016 at 04:06:30PM +0200, Paulina Szubarczyk wrote:
> Copy data operated on during request from/to local buffers to/from
> the grant references.
> 
> Before grant copy operation local buffers must be allocated what is
> done by calling ioreq_init_copy_buffers. For the 'read' operation,
> first, the qemu device invokes the read operation on local buffers
> and on the completion grant copy is called and buffers are freed.
> For the 'write' operation grant copy is performed before invoking
> write by qemu device.
> 
> A new value 'feature_grant_copy' is added to recognize when the
> grant copy operation is supported by a guest.
> 
> Signed-off-by: Paulina Szubarczyk <paulinaszubarczyk@xxxxxxxxx>

Thanks, this is looking good, just a couple of minor comments below.

> ---
> Changes since v3:
> - qemu_memalign/qemu_free is used instead function allocating
>   memory from xc.
> - removed the get_buffer function instead there is a direct call
>   to qemu_memalign.
> - moved ioreq_copy for write operation to ioreq_runio_qemu_aio.
> - added struct xengnttab_grant_copy_segment_t and stub in
>   xen_common.h for version of xen earlier then 480.
> - added checking for version 480 to configure. The test repeats
>   all the operation that are required for version < 480 and
>   checks if xengnttab_grant_copy() is implemented.
> 
> * I did not change the way of testing if grant_copy operation is
>   implemented. As far as I understand if the code from
>   gnttab_unimp.c is used then the gnttab device is unavailable
>   and the handler to gntdev would be invalid. But if the handler
>   is valid then the ioctl should return operation unimplemented
>   if the gntdev does not implement the operation.
> ---
>  configure                   |  56 +++++++++++++++++
>  hw/block/xen_disk.c         | 142 
> ++++++++++++++++++++++++++++++++++++++++++--
>  include/hw/xen/xen_common.h |  25 ++++++++
>  3 files changed, 218 insertions(+), 5 deletions(-)
> 
> diff --git a/configure b/configure
> index f57fcc6..b5bf7d4 100755
> --- a/configure
> +++ b/configure
> @@ -1956,6 +1956,62 @@ EOF
>  /*
>   * If we have stable libs the we don't want the libxc compat
>   * layers, regardless of what CFLAGS we may have been given.
> + *
> + * Also, check if xengnttab_grant_copy_segment_t is defined and
> + * grant copy operation is implemented.
> + */
> +#undef XC_WANT_COMPAT_EVTCHN_API
> +#undef XC_WANT_COMPAT_GNTTAB_API
> +#undef XC_WANT_COMPAT_MAP_FOREIGN_API
> +#include <xenctrl.h>
> +#include <xenstore.h>
> +#include <xenevtchn.h>
> +#include <xengnttab.h>
> +#include <xenforeignmemory.h>
> +#include <stdint.h>
> +#include <xen/hvm/hvm_info_table.h>
> +#if !defined(HVM_MAX_VCPUS)
> +# error HVM_MAX_VCPUS not defined
> +#endif
> +int main(void) {
> +  xc_interface *xc = NULL;
> +  xenforeignmemory_handle *xfmem;
> +  xenevtchn_handle *xe;
> +  xengnttab_handle *xg;
> +  xen_domain_handle_t handle;
> +  xengnttab_grant_copy_segment_t* seg = NULL;
> +
> +  xs_daemon_open();
> +
> +  xc = xc_interface_open(0, 0, 0);
> +  xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0);
> +  xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0);
> +  xc_hvm_inject_msi(xc, 0, 0xf0000000, 0x00000000);
> +  xc_hvm_create_ioreq_server(xc, 0, HVM_IOREQSRV_BUFIOREQ_ATOMIC, NULL);
> +  xc_domain_create(xc, 0, handle, 0, NULL, NULL);
> +
> +  xfmem = xenforeignmemory_open(0, 0);
> +  xenforeignmemory_map(xfmem, 0, 0, 0, 0, 0);
> +
> +  xe = xenevtchn_open(0, 0);
> +  xenevtchn_fd(xe);
> +
> +  xg = xengnttab_open(0, 0);
> +  xengnttab_map_grant_ref(xg, 0, 0, 0);

I don't think you need to check xengnttab_map_grant_ref, just 
xengnttab_grant_copy should be enough? Since xengnttab_grant_copy was added 
later you can assume xengnttab_map_grant_ref will be there.

> +  xengnttab_grant_copy(xg, 0, seg);
> +
> +  return 0;
> +}
> +EOF
> +      compile_prog "" "$xen_libs $xen_stable_libs"
> +    then
> +    xen_ctrl_version=480
> +    xen=yes
> +  elif
> +      cat > $TMPC <<EOF &&
> +/*
> + * If we have stable libs the we don't want the libxc compat
> + * layers, regardless of what CFLAGS we may have been given.
>   */
>  #undef XC_WANT_COMPAT_EVTCHN_API
>  #undef XC_WANT_COMPAT_GNTTAB_API
> diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
> index 3b8ad33..2dd1464 100644
> --- a/hw/block/xen_disk.c
> +++ b/hw/block/xen_disk.c
> @@ -119,6 +119,9 @@ struct XenBlkDev {
>      unsigned int        persistent_gnt_count;
>      unsigned int        max_grants;
>  
> +    /* Grant copy */
> +    gboolean            feature_grant_copy;
> +
>      /* qemu block driver */
>      DriveInfo           *dinfo;
>      BlockBackend        *blk;
> @@ -489,6 +492,95 @@ static int ioreq_map(struct ioreq *ioreq)
>      return 0;
>  }
>  
> +static void free_buffers(struct ioreq *ioreq)
> +{
> +    int i;
> +
> +    for (i = 0; i < ioreq->v.niov; i++) {
> +        ioreq->page[i] = NULL;
> +    }
> +
> +    qemu_vfree(ioreq->pages);
> +}
> +
> +static int ioreq_init_copy_buffers(struct ioreq *ioreq)
> +{
> +    int i;
> +
> +    if (ioreq->v.niov == 0) {
> +        return 0;
> +    }
> +
> +    ioreq->pages = qemu_memalign(XC_PAGE_SIZE, ioreq->v.niov * XC_PAGE_SIZE);
> +    if (!ioreq->pages) {
> +        return -1;
> +    }
> +
> +    for (i = 0; i < ioreq->v.niov; i++) {
> +        ioreq->page[i] = ioreq->pages + i * XC_PAGE_SIZE;
> +        ioreq->v.iov[i].iov_base = ioreq->page[i];
> +    }
> +
> +    return 0;
> +}
> +
> +static int ioreq_copy(struct ioreq *ioreq)
> +{
> +    xengnttab_handle *gnt = ioreq->blkdev->xendev.gnttabdev;
> +    xengnttab_grant_copy_segment_t segs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
> +    int i, count = 0, r, rc;

Why do you need to initialize count to 0 here? A couple of lines below you 
inconditionally set it to ioreq->v.niov.

> +    int64_t file_blk = ioreq->blkdev->file_blk;
> +
> +    if (ioreq->v.niov == 0) {
> +        return 0;
> +    }
> +
> +    count = ioreq->v.niov;
> +
> +    for (i = 0; i < count; i++) {
> +
> +        if (ioreq->req.operation == BLKIF_OP_READ) {
> +            segs[i].flags = GNTCOPY_dest_gref;
> +            segs[i].dest.foreign.ref = ioreq->refs[i];
> +            segs[i].dest.foreign.domid = ioreq->domids[i];
> +            segs[i].dest.foreign.offset = ioreq->req.seg[i].first_sect * 
> file_blk;
> +            segs[i].source.virt = ioreq->v.iov[i].iov_base;
> +        } else {
> +            segs[i].flags = GNTCOPY_source_gref;
> +            segs[i].source.foreign.ref = ioreq->refs[i];
> +            segs[i].source.foreign.domid = ioreq->domids[i];
> +            segs[i].source.foreign.offset = ioreq->req.seg[i].first_sect * 
> file_blk;
> +            segs[i].dest.virt = ioreq->v.iov[i].iov_base;
> +        }
> +        segs[i].len = (ioreq->req.seg[i].last_sect
> +                       - ioreq->req.seg[i].first_sect + 1) * file_blk;
> +
> +    }
> +
> +    rc = xengnttab_grant_copy(gnt, count, segs);
> +
> +    if (rc) {
> +        xen_be_printf(&ioreq->blkdev->xendev, 0,
> +                      "failed to copy data %d\n", rc);
> +        ioreq->aio_errors++;
> +        return -1;
> +    } else {
> +        r = 0;
> +    }

Do you really need both r and rc here? I think you could just have rc and 
use it below also.

> +    for (i = 0; i < count; i++) {
> +        if (segs[i].status != GNTST_okay) {
> +            xen_be_printf(&ioreq->blkdev->xendev, 3,
> +                          "failed to copy data %d for gref %d, domid %d\n", 
> rc,
> +                          ioreq->refs[i], ioreq->domids[i]);
> +            ioreq->aio_errors++;
> +            r = -1;
> +        }
> +    }
> +
> +    return r;
> +}
> +
>  static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
>  
>  static void qemu_aio_complete(void *opaque, int ret)

Roger.

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.