[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 12/14] xen-blkback: safely unmap grants in case they are still in use




+static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
+{
+       struct pending_req* pending_req = (struct pending_req*) (data->data);
+       struct xen_blkif *blkif = pending_req->blkif;
+
+       /* BUG_ON used to reproduce existing behaviour,
+          but is this the best way to deal with this? */
+       BUG_ON(result);
+       /* free page after unmap */
+       put_free_pages(blkif, data->pages, data->count);
+
+       /* respond */
+       make_response(blkif, pending_req->id,
+                     pending_req->operation, pending_req->status);
+       free_req(blkif, pending_req);
+       /*
+        * Make sure the request is freed before releasing blkif,
+        * or there could be a race between free_req and the
+        * cleanup done in xen_blkif_free during shutdown.
+        *
+        * NB: The fact that we might try to wake up pending_free_wq
+        * before drain_complete (in case there's a drain going on)
+        * it's not a problem with our current implementation
+        * because we can assure there's no thread waiting on
+        * pending_free_wq if there's a drain going on, but it has
+        * to be taken into account if the current model is changed.
+        */
+       if (atomic_dec_and_test(&blkif->inflight) && 
atomic_read(&blkif->drain)) {
+               complete(&blkif->drain_complete);
+       }
+       xen_blkif_put(blkif);
+}
+
+static void xen_blkbk_unmap_populate(struct pending_req *req)
+{
+
+       struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
+       struct xen_blkif *blkif = req->blkif;
+       struct grant_page **pages = req->segments;
+       unsigned int i, invcount = 0;
+
+       for (i = 0; i < req->nr_pages; i++) {
+               if (pages[i]->persistent_gnt != NULL) {
+                       put_persistent_gnt(blkif, pages[i]->persistent_gnt);
+                       continue;
+               }
+               if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
+                       continue;
+
+               req->unmap_pages[invcount] = pages[i]->page;
+               gnttab_set_unmap_op(&req->unmap[invcount], 
vaddr(pages[invcount]->page),
+                                   GNTMAP_host_map, pages[i]->handle);
+               pages[i]->handle = BLKBACK_INVALID_HANDLE;
+               invcount++;
+       }
+
+       work->data = req;
+       work->done = &xen_blkbk_unmap_and_respond_callback;
+       work->unmap_ops = req->unmap;
+       work->kunmap_ops = NULL;
+       work->pages = req->unmap_pages;
+       work->count = invcount;
+
+}
+
+static void xen_blkbk_unmap_and_respond(struct pending_req *req)
+{
+       xen_blkbk_unmap_populate(req);
+       gnttab_unmap_refs_async(&req->gnttab_unmap_data);
+}

(One more for this patch)

If you are using async unmap here, I thought new interface requires you to wait for completion (which should be called from xen_blkbk_unmap_and_respond_callback()).

-boris

  /*
   * Unmap the grant references, and also remove the M2P over-rides
   * used in the 'pending_req'.
@@ -982,6 +1075,9 @@ static void __end_block_io_op(struct pending_req 
*pending_req, int error)
         * the grant references associated with 'request' and provide
         * the proper response on the ring.
         */
+       if (atomic_dec_and_test(&pending_req->pendcnt))
+               xen_blkbk_unmap_and_respond(pending_req);
+
        if (atomic_dec_and_test(&pending_req->pendcnt)) {
                struct xen_blkif *blkif = pending_req->blkif;


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.