[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1/2] xen: add a global indicator for grant copy being available



The Xen qdisk backend needs to test whether grant copy operations is
available in the kernel. Unfortunately this collides with using
xengnttab_set_max_grants() on some kernels as this operation has to
be the first one after opening the gnttab device.

In order to solve this problem test for the availability of grant copy
in xen_be_init() opening the gnttab device just for that purpose and
closing it again afterwards. Advertise the availability via a global
flag and use that flag in the qdisk backend.

Signed-off-by: Juergen Gross <jgross@xxxxxxxx>
---
 hw/block/xen_disk.c          | 18 ++++++------------
 hw/xen/xen_backend.c         | 11 +++++++++++
 include/hw/xen/xen_backend.h |  1 +
 3 files changed, 18 insertions(+), 12 deletions(-)

diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
index d42ed7070d..6632746250 100644
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -121,9 +121,6 @@ struct XenBlkDev {
     unsigned int        persistent_gnt_count;
     unsigned int        max_grants;
 
-    /* Grant copy */
-    gboolean            feature_grant_copy;
-
     /* qemu block driver */
     DriveInfo           *dinfo;
     BlockBackend        *blk;
@@ -616,7 +613,7 @@ static void qemu_aio_complete(void *opaque, int ret)
         return;
     }
 
-    if (ioreq->blkdev->feature_grant_copy) {
+    if (xen_feature_grant_copy) {
         switch (ioreq->req.operation) {
         case BLKIF_OP_READ:
             /* in case of failure ioreq->aio_errors is increased */
@@ -638,7 +635,7 @@ static void qemu_aio_complete(void *opaque, int ret)
     }
 
     ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
-    if (!ioreq->blkdev->feature_grant_copy) {
+    if (!xen_feature_grant_copy) {
         ioreq_unmap(ioreq);
     }
     ioreq_finish(ioreq);
@@ -698,7 +695,7 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
 {
     struct XenBlkDev *blkdev = ioreq->blkdev;
 
-    if (ioreq->blkdev->feature_grant_copy) {
+    if (xen_feature_grant_copy) {
         ioreq_init_copy_buffers(ioreq);
         if (ioreq->req.nr_segments && (ioreq->req.operation == BLKIF_OP_WRITE 
||
             ioreq->req.operation == BLKIF_OP_FLUSH_DISKCACHE) &&
@@ -750,7 +747,7 @@ static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
     }
     default:
         /* unknown operation (shouldn't happen -- parse catches this) */
-        if (!ioreq->blkdev->feature_grant_copy) {
+        if (!xen_feature_grant_copy) {
             ioreq_unmap(ioreq);
         }
         goto err;
@@ -1010,18 +1007,15 @@ static int blk_init(struct XenDevice *xendev)
 
     blkdev->file_blk  = BLOCK_SIZE;
 
-    blkdev->feature_grant_copy =
-                (xengnttab_grant_copy(blkdev->xendev.gnttabdev, 0, NULL) == 0);
-
     xen_pv_printf(&blkdev->xendev, 3, "grant copy operation %s\n",
-                  blkdev->feature_grant_copy ? "enabled" : "disabled");
+                  xen_feature_grant_copy ? "enabled" : "disabled");
 
     /* fill info
      * blk_connect supplies sector-size and sectors
      */
     xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
     xenstore_write_be_int(&blkdev->xendev, "feature-persistent",
-                          !blkdev->feature_grant_copy);
+                          !xen_feature_grant_copy);
     xenstore_write_be_int(&blkdev->xendev, "info", info);
 
     xenstore_write_be_int(&blkdev->xendev, "max-ring-page-order",
diff --git a/hw/xen/xen_backend.c b/hw/xen/xen_backend.c
index c46cbb0759..00210627a9 100644
--- a/hw/xen/xen_backend.c
+++ b/hw/xen/xen_backend.c
@@ -44,6 +44,7 @@ BusState *xen_sysbus;
 /* public */
 struct xs_handle *xenstore = NULL;
 const char *xen_protocol;
+gboolean xen_feature_grant_copy;
 
 /* private */
 static int debug;
@@ -519,6 +520,8 @@ void xenstore_update_fe(char *watch, struct XenDevice 
*xendev)
 
 int xen_be_init(void)
 {
+    xengnttab_handle *gnttabdev;
+
     xenstore = xs_daemon_open();
     if (!xenstore) {
         xen_pv_printf(NULL, 0, "can't connect to xenstored\n");
@@ -532,6 +535,14 @@ int xen_be_init(void)
         goto err;
     }
 
+    gnttabdev = xengnttab_open(NULL, 0);
+    if (gnttabdev != NULL) {
+        if (xengnttab_grant_copy(gnttabdev, 0, NULL) == 0) {
+            xen_feature_grant_copy = true;
+        }
+        xengnttab_close(gnttabdev);
+    }
+
     xen_sysdev = qdev_create(NULL, TYPE_XENSYSDEV);
     qdev_init_nofail(xen_sysdev);
     xen_sysbus = qbus_create(TYPE_XENSYSBUS, DEVICE(xen_sysdev), "xen-sysbus");
diff --git a/include/hw/xen/xen_backend.h b/include/hw/xen/xen_backend.h
index 8a6fbcbe20..08a054f524 100644
--- a/include/hw/xen/xen_backend.h
+++ b/include/hw/xen/xen_backend.h
@@ -16,6 +16,7 @@
 /* variables */
 extern struct xs_handle *xenstore;
 extern const char *xen_protocol;
+extern gboolean xen_feature_grant_copy;
 extern DeviceState *xen_sysdev;
 extern BusState *xen_sysbus;
 
-- 
2.12.3


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.