[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Minios-devel] [UNIKRAFT PATCH 17/17] plat/xen/drivers/blk: Optimize using pool of grant refs for each queue



Each read / write request needs a number of grant references. In order to
avoid allocating new grant refs at the beginning of every operation and
freeing them at the end of it, we use the same grant references stored in
a pool.
In case the pool does not contain enough grant refs for the request, new
ones are allocated, and they are freed when the response is processed.

Signed-off-by: Roxana Nicolescu <nicolescu.roxana1996@xxxxxxxxx>
---
 plat/xen/Config.uk              |  21 +++++-
 plat/xen/drivers/blk/blkfront.c | 141 ++++++++++++++++++++++++++++++++++++++++
 plat/xen/drivers/blk/blkfront.h |  35 ++++++++++
 3 files changed, 196 insertions(+), 1 deletion(-)

diff --git a/plat/xen/Config.uk b/plat/xen/Config.uk
index e6d9132c..719a271b 100644
--- a/plat/xen/Config.uk
+++ b/plat/xen/Config.uk
@@ -73,11 +73,30 @@ menu "Xenbus Drivers"
         depends on XEN_XENBUS
         depends on XEN_GNTTAB
 
-config XEN_BLKFRONT
+menuconfig XEN_BLKFRONT
        bool "Xenbus Blkfront Driver"
        default n
        depends on LIBUKBLKDEV
        help
                Driver for block devices
+
+        if XEN_BLKFRONT
+                config XEN_BLKFRONT_GREFPOOL
+                        bool "Enable grant reference pool for each queue"
+                        default y
+                        select LIBUKSCHED
+                        select LIBUKLOCK
+                        select LIBUKLOCK_SEMAPHORE
+                        help
+                                Each read / write request needs a number of
+                                grant references. In order to avoid the need
+                                of allocating the grant refs at the beginning
+                                of every operation and freeing them at the end
+                                of it, we use the same grant references stored
+                                in a queue. If at the moment of sending a
+                                request, there are not enough grant refs in the
+                                pool, we just allocate new ones, which are
+                                freed at the moment of processing the response.
+        endif
 endmenu
 endif
diff --git a/plat/xen/drivers/blk/blkfront.c b/plat/xen/drivers/blk/blkfront.c
index 23189156..3cbe3570 100644
--- a/plat/xen/drivers/blk/blkfront.c
+++ b/plat/xen/drivers/blk/blkfront.c
@@ -66,6 +66,8 @@
 
 static struct uk_alloc *drv_allocator;
 
+/* This function gets from pool gref_elems or allocates new ones
+ */
 static int blkfront_request_set_grefs(struct blkfront_request *blkfront_req)
 {
        struct blkfront_gref *ref_elem;
@@ -73,9 +75,28 @@ static int blkfront_request_set_grefs(struct 
blkfront_request *blkfront_req)
        int grefi = 0, grefj;
        int err = 0;
 
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+       struct uk_blkdev_queue *queue;
+       struct blkfront_grefs_pool *grefs_pool;
+       int rc = 0;
+#endif
+
        UK_ASSERT(blkfront_req != NULL);
        nb_segments = blkfront_req->nb_segments;
 
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+       queue = blkfront_req->queue;
+       grefs_pool = &queue->ref_pool;
+       uk_semaphore_down(&grefs_pool->sem);
+       for (grefi = 0; grefi < nb_segments &&
+               !UK_STAILQ_EMPTY(&grefs_pool->grefs_list); ++grefi) {
+               ref_elem = UK_STAILQ_FIRST(&grefs_pool->grefs_list);
+               UK_STAILQ_REMOVE_HEAD(&grefs_pool->grefs_list, _list);
+               blkfront_req->gref[grefi] = ref_elem;
+       }
+
+       uk_semaphore_up(&grefs_pool->sem);
+#endif
        /* we allocate new ones */
        for (; grefi < nb_segments; ++grefi) {
                ref_elem = uk_malloc(drv_allocator, sizeof(*ref_elem));
@@ -84,6 +105,9 @@ static int blkfront_request_set_grefs(struct 
blkfront_request *blkfront_req)
                        goto err;
                }
 
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+               ref_elem->reusable_gref = false;
+#endif
                blkfront_req->gref[grefi] = ref_elem;
        }
 
@@ -93,21 +117,52 @@ err:
        /* Free all the elements from 0 index to where the error happens */
        for (grefj = 0; grefj < grefi; ++grefj) {
                ref_elem = blkfront_req->gref[grefj];
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+               if (ref_elem->reusable_gref) {
+                       rc = gnttab_end_access(ref_elem->ref);
+                       UK_ASSERT(rc);
+               }
+#endif
                uk_free(drv_allocator, ref_elem);
        }
        goto out;
 }
 
+/* First gref_elems from blkfront_request were popped from the pool.
+ * All this elements has the reusable_gref flag set.
+ * We continue transferring elements from blkfront_request to the pool
+ * of grant_refs until we encounter an element with the reusable flag unset.
+ **/
 static void blkfront_request_reset_grefs(struct blkfront_request *req)
 {
        uint16_t gref_id = 0;
        struct blkfront_gref *gref_elem;
        uint16_t nb_segments;
        int rc;
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+       struct uk_blkdev_queue *queue;
+       struct blkfront_grefs_pool *grefs_pool;
+#endif
 
        UK_ASSERT(req);
        nb_segments = req->nb_segments;
 
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+       queue = req->queue;
+       grefs_pool = &queue->ref_pool;
+       uk_semaphore_down(&grefs_pool->sem);
+       for (; gref_id < nb_segments; ++gref_id) {
+               gref_elem = req->gref[gref_id];
+               if (!gref_elem->reusable_gref)
+                       break;
+
+               UK_STAILQ_INSERT_TAIL(&grefs_pool->grefs_list,
+                       gref_elem,
+                       _list);
+       }
+
+       uk_semaphore_up(&grefs_pool->sem);
+#endif
        for (; gref_id < nb_segments; ++gref_id) {
                gref_elem = req->gref[gref_id];
                if (gref_elem->ref != GRANT_INVALID_REF) {
@@ -119,6 +174,11 @@ static void blkfront_request_reset_grefs(struct 
blkfront_request *req)
        }
 }
 
+/* This function sets the grant references from pool to point to
+ * data set at request.
+ * Otherwise, new blkfront_gref elems are allocated and new grant refs
+ * as well.
+ **/
 static void blkfront_request_map_grefs(struct blkif_request *ring_req,
                domid_t otherend_id)
 {
@@ -129,6 +189,9 @@ static void blkfront_request_map_grefs(struct blkif_request 
*ring_req,
        uintptr_t data;
        uintptr_t start_sector;
        struct blkfront_gref *ref_elem;
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+       int rc;
+#endif
 
        UK_ASSERT(ring_req);
 
@@ -140,6 +203,14 @@ static void blkfront_request_map_grefs(struct 
blkif_request *ring_req,
        for (gref_index = 0; gref_index < nb_segments; ++gref_index) {
                data = start_sector + gref_index * PAGE_SIZE;
                ref_elem = blkfront_req->gref[gref_index];
+
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+               if (ref_elem->reusable_gref) {
+                       rc = gnttab_update_grant(ref_elem->ref, otherend_id,
+                               virtual_to_mfn(data), ring_req->operation);
+                       UK_ASSERT(rc);
+               } else
+#endif
                ref_elem->ref = gnttab_grant_access(otherend_id,
                                virtual_to_mfn(data), ring_req->operation);
 
@@ -229,6 +300,7 @@ static int blkfront_request_write(struct blkfront_request 
*blkfront_req,
        blkfront_ring_wr_init(ring_req, sector_size);
        blkfront_req->nb_segments = ring_req->nr_segments;
 
+       /* Get blkfront_grefs from pool or allocate new ones */
        rc = blkfront_request_set_grefs(blkfront_req);
        if (rc)
                goto out;
@@ -542,6 +614,63 @@ static void blkfront_ring_fini(struct uk_blkdev_queue 
*queue)
                uk_free_page(queue->a, queue->ring.sring);
 }
 
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+static void blkfront_queue_gref_pool_release(struct uk_blkdev_queue *queue)
+{
+       struct blkfront_grefs_pool *grefs_pool;
+       struct blkfront_gref *ref_elem;
+       int rc;
+
+       UK_ASSERT(queue);
+       grefs_pool = &queue->ref_pool;
+
+       while (!UK_STAILQ_EMPTY(&grefs_pool->grefs_list)) {
+               ref_elem = UK_STAILQ_FIRST(&grefs_pool->grefs_list);
+               if (ref_elem->ref != GRANT_INVALID_REF) {
+                       rc = gnttab_end_access(ref_elem->ref);
+                       UK_ASSERT(rc);
+               }
+
+               uk_free(queue->a, ref_elem);
+               UK_STAILQ_REMOVE_HEAD(&grefs_pool->grefs_list, _list);
+       }
+}
+
+static int blkfront_queue_gref_pool_setup(struct uk_blkdev_queue *queue)
+{
+       int ref_idx;
+       struct blkfront_gref *gref_elem;
+       struct blkfront_dev *dev;
+       int rc = 0;
+
+       UK_ASSERT(queue);
+       dev = queue->dev;
+       uk_semaphore_init(&queue->ref_pool.sem, 1);
+       UK_STAILQ_INIT(&queue->ref_pool.grefs_list);
+
+       for (ref_idx = 0; ref_idx < BLKIF_MAX_SEGMENTS_PER_REQUEST; ++ref_idx) {
+               gref_elem = uk_malloc(queue->a, sizeof(*gref_elem));
+               if (!gref_elem) {
+                       rc = -ENOMEM;
+                       goto err;
+               }
+
+               gref_elem->ref = gnttab_grant_access(dev->xendev->otherend_id,
+                               0, 1);
+               UK_ASSERT(gref_elem->ref != GRANT_INVALID_REF);
+               gref_elem->reusable_gref = true;
+               UK_STAILQ_INSERT_TAIL(&queue->ref_pool.grefs_list, gref_elem,
+                               _list);
+       }
+
+out:
+       return rc;
+err:
+       blkfront_queue_gref_pool_release(queue);
+       goto out;
+}
+#endif
+
 /* Handler for event channel notifications */
 static void blkfront_handler(evtchn_port_t port __unused,
                struct __regs *regs __unused, void *arg)
@@ -595,6 +724,12 @@ static struct uk_blkdev_queue *blkfront_queue_setup(struct 
uk_blkdev *blkdev,
                goto err_out;
        }
 
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+       err = blkfront_queue_gref_pool_setup(queue);
+       if (err)
+               goto err_out;
+#endif
+
        return queue;
 
 err_out:
@@ -622,6 +757,12 @@ static int blkfront_queue_release(struct uk_blkdev *blkdev,
        unbind_evtchn(queue->evtchn);
        blkfront_ring_fini(queue);
 
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+       blkfront_queue_gref_pool_release(queue);
+#endif
+
+       return 0;
+}
 
 static int blkfront_queue_intr_enable(struct uk_blkdev *blkdev,
                                             uint16_t queue_id)
diff --git a/plat/xen/drivers/blk/blkfront.h b/plat/xen/drivers/blk/blkfront.h
index b100ca2f..16ed5817 100644
--- a/plat/xen/drivers/blk/blkfront.h
+++ b/plat/xen/drivers/blk/blkfront.h
@@ -42,11 +42,33 @@
  * implementation.
  */
 #include <uk/blkdev.h>
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+#include <uk/list.h>
+#include <uk/semaphore.h>
+#include <stdbool.h>
+#endif
 
 #include <xen/io/blkif.h>
 #include <common/gnttab.h>
 #include <common/events.h>
 
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+/**
+ * Structure used to describe a list of blkfront_gref elements.
+ */
+UK_STAILQ_HEAD(blkfront_gref_list, struct blkfront_gref);
+
+/*
+ * Structure used to describe a pool of grant refs for each queue.
+ * It contains max BLKIF_MAX_SEGMENTS_PER_REQUEST elems.
+ **/
+struct blkfront_grefs_pool {
+       /* List of grefs. */
+       struct blkfront_gref_list grefs_list;
+       /* Semaphore for synchronization. */
+       struct uk_semaphore sem;
+};
+#endif
 
 /**
  * Structure used to describe a grant ref element.
@@ -54,6 +76,14 @@
 struct blkfront_gref {
        /* Grant ref number. */
        grant_ref_t ref;
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+       /* Entry for pool. */
+       UK_STAILQ_ENTRY(struct blkfront_gref) _list;
+       /* It is True if it was pulled from the pool.
+        * Otherwise this structure was allocated during the request.
+        **/
+       bool reusable_gref;
+#endif
 };
 
 /**
@@ -88,6 +118,11 @@ struct uk_blkdev_queue {
        int intr_enabled;
        /* Reference to the Blkfront Device */
        struct blkfront_dev *dev;
+
+#if CONFIG_XEN_BLKFRONT_GREFPOOL
+       /* Grant refs pool. */
+       struct blkfront_grefs_pool ref_pool;
+#endif
 };
 
 /**
-- 
2.11.0


_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.