[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH for-xen-4.5] xen_disk: fix unmapping of persistent grants



On Wed, Nov 12, 2014 at 03:55:51PM +0000, George Dunlap wrote:
> On 11/12/2014 03:45 PM, Roger Pau Monne wrote:
> >This patch fixes two issues with persistent grants and the disk PV backend
> >(Qdisk):
> >
> >  - Don't use batch mappings when using persistent grants, doing so prevents
> >    unmapping single grants (the whole area has to be unmapped at once).
> >  - Unmap persistent grants before switching to the closed state, so the
> >    frontend can also free them.
> >
> >Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
> >Reported-and-Tested-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> >Cc: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
> >Cc: Kevin Wolf <kwolf@xxxxxxxxxx>
> >Cc: Stefan Hajnoczi <stefanha@xxxxxxxxxx>
> >Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> 
> CC'ing Konrad and Stefano: This fixes a critical bug that should be a
> blocker for the Xen 4.5 release.  Without this, any backend using qdisk for
> a PV guest with pygrub (including qcow2 and vhd) will crash dom0.

Changing the title to reflect that.

Stefano?
> 
>  -George
> 
> >---
> >  hw/block/xen_disk.c | 35 ++++++++++++++++++++++++-----------
> >  1 file changed, 24 insertions(+), 11 deletions(-)
> >
> >diff --git a/hw/block/xen_disk.c b/hw/block/xen_disk.c
> >index 231e9a7..1300c0a 100644
> >--- a/hw/block/xen_disk.c
> >+++ b/hw/block/xen_disk.c
> >@@ -43,8 +43,6 @@
> >  /* ------------------------------------------------------------- */
> >-static int batch_maps   = 0;
> >-
> >  static int max_requests = 32;
> >  /* ------------------------------------------------------------- */
> >@@ -105,6 +103,7 @@ struct XenBlkDev {
> >      blkif_back_rings_t  rings;
> >      int                 more_work;
> >      int                 cnt_map;
> >+    bool                batch_maps;
> >      /* request lists */
> >      QLIST_HEAD(inflight_head, ioreq) inflight;
> >@@ -309,7 +308,7 @@ static void ioreq_unmap(struct ioreq *ioreq)
> >      if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
> >          return;
> >      }
> >-    if (batch_maps) {
> >+    if (ioreq->blkdev->batch_maps) {
> >          if (!ioreq->pages) {
> >              return;
> >          }
> >@@ -386,7 +385,7 @@ static int ioreq_map(struct ioreq *ioreq)
> >          new_maps = ioreq->v.niov;
> >      }
> >-    if (batch_maps && new_maps) {
> >+    if (ioreq->blkdev->batch_maps && new_maps) {
> >          ioreq->pages = xc_gnttab_map_grant_refs
> >              (gnt, new_maps, domids, refs, ioreq->prot);
> >          if (ioreq->pages == NULL) {
> >@@ -433,7 +432,7 @@ static int ioreq_map(struct ioreq *ioreq)
> >               */
> >              grant = g_malloc0(sizeof(*grant));
> >              new_maps--;
> >-            if (batch_maps) {
> >+            if (ioreq->blkdev->batch_maps) {
> >                  grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
> >              } else {
> >                  grant->page = ioreq->page[new_maps];
> >@@ -718,7 +717,9 @@ static void blk_alloc(struct XenDevice *xendev)
> >      QLIST_INIT(&blkdev->freelist);
> >      blkdev->bh = qemu_bh_new(blk_bh, blkdev);
> >      if (xen_mode != XEN_EMULATE) {
> >-        batch_maps = 1;
> >+        blkdev->batch_maps = TRUE;
> >+    } else {
> >+        blkdev->batch_maps = FALSE;
> >      }
> >      if (xc_gnttab_set_max_grants(xendev->gnttabdev,
> >              MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) 
> > {
> >@@ -923,6 +924,13 @@ static int blk_connect(struct XenDevice *xendev)
> >      } else {
> >          blkdev->feature_persistent = !!pers;
> >      }
> >+    if (blkdev->feature_persistent) {
> >+        /*
> >+         * Disable batch maps, since that would prevent unmapping
> >+         * single persistent grants.
> >+         */
> >+        blkdev->batch_maps = FALSE;
> >+    }
> >      blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
> >      if (blkdev->xendev.protocol) {
> >@@ -1000,6 +1008,16 @@ static void blk_disconnect(struct XenDevice *xendev)
> >          blkdev->cnt_map--;
> >          blkdev->sring = NULL;
> >      }
> >+
> >+    /*
> >+     * Unmap persistent grants before switching to the closed state
> >+     * so the frontend can free them.
> >+     */
> >+    if (blkdev->feature_persistent) {
> >+        g_tree_destroy(blkdev->persistent_gnts);
> >+        assert(blkdev->persistent_gnt_count == 0);
> >+        blkdev->feature_persistent = FALSE;
> >+    }
> >  }
> >  static int blk_free(struct XenDevice *xendev)
> >@@ -1011,11 +1029,6 @@ static int blk_free(struct XenDevice *xendev)
> >          blk_disconnect(xendev);
> >      }
> >-    /* Free persistent grants */
> >-    if (blkdev->feature_persistent) {
> >-        g_tree_destroy(blkdev->persistent_gnts);
> >-    }
> >-
> >      while (!QLIST_EMPTY(&blkdev->freelist)) {
> >          ioreq = QLIST_FIRST(&blkdev->freelist);
> >          QLIST_REMOVE(ioreq, list);
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.