[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v4 02/10] xen/blkfront: separate per ring information out of device info



On Mon, Nov 02, 2015 at 12:21:38PM +0800, Bob Liu wrote:
> Split per ring information to an new structure "blkfront_ring_info".
> 
> A ring is the representation of a hardware queue, every vbd device can 
> associate
> with one or more rings depending on how many hardware queues/rings to be used.

This along with the patch built with kbuild looks good.

However you will need to rebase it on top v4.4-rc1 to deal with
"block/xen-blkfront: Make it running on 64KB page granularity"

in mind.
> 
> This patch is a preparation for supporting real multi hardware queues/rings.
> 
> Signed-off-by: Arianna Avanzini <avanzini.arianna@xxxxxxxxx>
> Signed-off-by: Bob Liu <bob.liu@xxxxxxxxxx>
> ---
>  drivers/block/xen-blkfront.c | 321 
> ++++++++++++++++++++++++-------------------
>  1 file changed, 178 insertions(+), 143 deletions(-)
> 
> diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
> index a69c02d..2a557e4 100644
> --- a/drivers/block/xen-blkfront.c
> +++ b/drivers/block/xen-blkfront.c
> @@ -115,6 +115,23 @@ MODULE_PARM_DESC(max_ring_page_order, "Maximum order of 
> pages to be used for the
>  #define RINGREF_NAME_LEN (20)
>  
>  /*
> + *  Per-ring info.
> + *  Every blkfront device can associate with one or more blkfront_ring_info,
> + *  depending on how many hardware queues/rings to be used.
> + */
> +struct blkfront_ring_info {
> +     struct blkif_front_ring ring;
> +     unsigned int ring_ref[XENBUS_MAX_RING_PAGES];
> +     unsigned int evtchn, irq;
> +     struct work_struct work;
> +     struct gnttab_free_callback callback;
> +     struct blk_shadow shadow[BLK_MAX_RING_SIZE];
> +     struct list_head indirect_pages;
> +     unsigned long shadow_free;
> +     struct blkfront_info *dev_info;
> +};
> +
> +/*
>   * We have one of these per vbd, whether ide, scsi or 'other'.  They
>   * hang in private_data off the gendisk structure. We may end up
>   * putting all kinds of interesting stuff here :-)
> @@ -128,18 +145,10 @@ struct blkfront_info
>       int vdevice;
>       blkif_vdev_t handle;
>       enum blkif_state connected;
> -     int ring_ref[XENBUS_MAX_RING_PAGES];
>       unsigned int nr_ring_pages;
> -     struct blkif_front_ring ring;
> -     unsigned int evtchn, irq;
>       struct request_queue *rq;
> -     struct work_struct work;
> -     struct gnttab_free_callback callback;
> -     struct blk_shadow shadow[BLK_MAX_RING_SIZE];
>       struct list_head grants;
> -     struct list_head indirect_pages;
>       unsigned int persistent_gnts_c;
> -     unsigned long shadow_free;
>       unsigned int feature_flush;
>       unsigned int feature_discard:1;
>       unsigned int feature_secdiscard:1;
> @@ -149,6 +158,7 @@ struct blkfront_info
>       unsigned int max_indirect_segments;
>       int is_ready;
>       struct blk_mq_tag_set tag_set;
> +     struct blkfront_ring_info rinfo;
>  };
>  
>  static unsigned int nr_minors;
> @@ -179,33 +189,35 @@ static DEFINE_SPINLOCK(minor_lock);
>  #define INDIRECT_GREFS(_segs) \
>       ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
>  
> -static int blkfront_setup_indirect(struct blkfront_info *info);
> +static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
>  static int blkfront_gather_backend_features(struct blkfront_info *info);
>  
> -static int get_id_from_freelist(struct blkfront_info *info)
> +static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
>  {
> -     unsigned long free = info->shadow_free;
> -     BUG_ON(free >= BLK_RING_SIZE(info));
> -     info->shadow_free = info->shadow[free].req.u.rw.id;
> -     info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
> +     unsigned long free = rinfo->shadow_free;
> +
> +     BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
> +     rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
> +     rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
>       return free;
>  }
>  
> -static int add_id_to_freelist(struct blkfront_info *info,
> +static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
>                              unsigned long id)
>  {
> -     if (info->shadow[id].req.u.rw.id != id)
> +     if (rinfo->shadow[id].req.u.rw.id != id)
>               return -EINVAL;
> -     if (info->shadow[id].request == NULL)
> +     if (rinfo->shadow[id].request == NULL)
>               return -EINVAL;
> -     info->shadow[id].req.u.rw.id  = info->shadow_free;
> -     info->shadow[id].request = NULL;
> -     info->shadow_free = id;
> +     rinfo->shadow[id].req.u.rw.id  = rinfo->shadow_free;
> +     rinfo->shadow[id].request = NULL;
> +     rinfo->shadow_free = id;
>       return 0;
>  }
>  
> -static int fill_grant_buffer(struct blkfront_info *info, int num)
> +static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
>  {
> +     struct blkfront_info *info = rinfo->dev_info;
>       struct page *granted_page;
>       struct grant *gnt_list_entry, *n;
>       int i = 0;
> @@ -341,8 +353,8 @@ static void xlbd_release_minors(unsigned int minor, 
> unsigned int nr)
>  
>  static void blkif_restart_queue_callback(void *arg)
>  {
> -     struct blkfront_info *info = (struct blkfront_info *)arg;
> -     schedule_work(&info->work);
> +     struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
> +     schedule_work(&rinfo->work);
>  }
>  
>  static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
> @@ -400,7 +412,7 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t 
> mode,
>   *
>   * @req: a request struct
>   */
> -static int blkif_queue_request(struct request *req)
> +static int blkif_queue_request(struct request *req, struct 
> blkfront_ring_info *rinfo)
>  {
>       struct blkfront_info *info = req->rq_disk->private_data;
>       struct blkif_request *ring_req;
> @@ -438,9 +450,9 @@ static int blkif_queue_request(struct request *req)
>                   max_grefs - info->persistent_gnts_c,
>                   &gref_head) < 0) {
>                       gnttab_request_free_callback(
> -                             &info->callback,
> +                             &rinfo->callback,
>                               blkif_restart_queue_callback,
> -                             info,
> +                             rinfo,
>                               max_grefs);
>                       return 1;
>               }
> @@ -448,9 +460,9 @@ static int blkif_queue_request(struct request *req)
>               new_persistent_gnts = 0;
>  
>       /* Fill out a communications ring structure. */
> -     ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
> -     id = get_id_from_freelist(info);
> -     info->shadow[id].request = req;
> +     ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
> +     id = get_id_from_freelist(rinfo);
> +     rinfo->shadow[id].request = req;
>  
>       if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
>               ring_req->operation = BLKIF_OP_DISCARD;
> @@ -466,7 +478,7 @@ static int blkif_queue_request(struct request *req)
>                      req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
>               BUG_ON(info->max_indirect_segments &&
>                      req->nr_phys_segments > info->max_indirect_segments);
> -             nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
> +             nseg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
>               ring_req->u.rw.id = id;
>               if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
>                       /*
> @@ -509,7 +521,7 @@ static int blkif_queue_request(struct request *req)
>                       }
>                       ring_req->u.rw.nr_segments = nseg;
>               }
> -             for_each_sg(info->shadow[id].sg, sg, nseg, i) {
> +             for_each_sg(rinfo->shadow[id].sg, sg, nseg, i) {
>                       fsect = sg->offset >> 9;
>                       lsect = fsect + (sg->length >> 9) - 1;
>  
> @@ -525,14 +537,14 @@ static int blkif_queue_request(struct request *req)
>                                       struct page *indirect_page;
>  
>                                       /* Fetch a pre-allocated page to use 
> for indirect grefs */
> -                                     
> BUG_ON(list_empty(&info->indirect_pages));
> -                                     indirect_page = 
> list_first_entry(&info->indirect_pages,
> +                                     
> BUG_ON(list_empty(&rinfo->indirect_pages));
> +                                     indirect_page = 
> list_first_entry(&rinfo->indirect_pages,
>                                                                        struct 
> page, lru);
>                                       list_del(&indirect_page->lru);
>                                       pfn = page_to_pfn(indirect_page);
>                               }
>                               gnt_list_entry = get_grant(&gref_head, pfn, 
> info);
> -                             info->shadow[id].indirect_grants[n] = 
> gnt_list_entry;
> +                             rinfo->shadow[id].indirect_grants[n] = 
> gnt_list_entry;
>                               segments = 
> kmap_atomic(pfn_to_page(gnt_list_entry->pfn));
>                               ring_req->u.indirect.indirect_grefs[n] = 
> gnt_list_entry->gref;
>                       }
> @@ -540,7 +552,7 @@ static int blkif_queue_request(struct request *req)
>                       gnt_list_entry = get_grant(&gref_head, 
> page_to_pfn(sg_page(sg)), info);
>                       ref = gnt_list_entry->gref;
>  
> -                     info->shadow[id].grants_used[i] = gnt_list_entry;
> +                     rinfo->shadow[id].grants_used[i] = gnt_list_entry;
>  
>                       if (rq_data_dir(req) && info->feature_persistent) {
>                               char *bvec_data;
> @@ -586,10 +598,10 @@ static int blkif_queue_request(struct request *req)
>                       kunmap_atomic(segments);
>       }
>  
> -     info->ring.req_prod_pvt++;
> +     rinfo->ring.req_prod_pvt++;
>  
>       /* Keep a private copy so we can reissue requests when recovering. */
> -     info->shadow[id].req = *ring_req;
> +     rinfo->shadow[id].req = *ring_req;
>  
>       if (new_persistent_gnts)
>               gnttab_free_grant_references(gref_head);
> @@ -598,14 +610,14 @@ static int blkif_queue_request(struct request *req)
>  }
>  
>  
> -static inline void flush_requests(struct blkfront_info *info)
> +static inline void flush_requests(struct blkfront_ring_info *rinfo)
>  {
>       int notify;
>  
> -     RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
> +     RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
>  
>       if (notify)
> -             notify_remote_via_irq(info->irq);
> +             notify_remote_via_irq(rinfo->irq);
>  }
>  
>  static inline bool blkif_request_flush_invalid(struct request *req,
> @@ -621,20 +633,20 @@ static inline bool blkif_request_flush_invalid(struct 
> request *req,
>  static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
>                          const struct blk_mq_queue_data *qd)
>  {
> -     struct blkfront_info *info = qd->rq->rq_disk->private_data;
> +     struct blkfront_ring_info *rinfo = (struct blkfront_ring_info 
> *)hctx->driver_data;
>  
>       blk_mq_start_request(qd->rq);
>       spin_lock_irq(&info->io_lock);
> -     if (RING_FULL(&info->ring))
> +     if (RING_FULL(&rinfo->ring))
>               goto out_busy;
>  
> -     if (blkif_request_flush_invalid(qd->rq, info))
> +     if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
>               goto out_err;
>  
> -     if (blkif_queue_request(qd->rq))
> +     if (blkif_queue_request(qd->rq, rinfo))
>               goto out_busy;
>  
> -     flush_requests(info);
> +     flush_requests(rinfo);
>       spin_unlock_irq(&info->io_lock);
>       return BLK_MQ_RQ_QUEUE_OK;
>  
> @@ -648,9 +660,19 @@ out_busy:
>       return BLK_MQ_RQ_QUEUE_BUSY;
>  }
>  
> +static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
> +                         unsigned int index)
> +{
> +     struct blkfront_info *info = (struct blkfront_info *)data;
> +
> +     hctx->driver_data = &info->rinfo;
> +     return 0;
> +}
> +
>  static struct blk_mq_ops blkfront_mq_ops = {
>       .queue_rq = blkif_queue_rq,
>       .map_queue = blk_mq_map_queue,
> +     .init_hctx = blk_mq_init_hctx,
>  };
>  
>  static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
> @@ -903,6 +925,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
>  static void xlvbd_release_gendisk(struct blkfront_info *info)
>  {
>       unsigned int minor, nr_minors;
> +     struct blkfront_ring_info *rinfo = &info->rinfo;
>  
>       if (info->rq == NULL)
>               return;
> @@ -911,10 +934,10 @@ static void xlvbd_release_gendisk(struct blkfront_info 
> *info)
>       blk_mq_stop_hw_queues(info->rq);
>  
>       /* No more gnttab callback work. */
> -     gnttab_cancel_free_callback(&info->callback);
> +     gnttab_cancel_free_callback(&rinfo->callback);
>  
>       /* Flush gnttab callback work. Must be done with no locks held. */
> -     flush_work(&info->work);
> +     flush_work(&rinfo->work);
>  
>       del_gendisk(info->gd);
>  
> @@ -931,20 +954,20 @@ static void xlvbd_release_gendisk(struct blkfront_info 
> *info)
>  }
>  
>  /* Must be called with io_lock holded */
> -static void kick_pending_request_queues(struct blkfront_info *info)
> +static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
>  {
> -     if (!RING_FULL(&info->ring))
> -             blk_mq_start_stopped_hw_queues(info->rq, true);
> +     if (!RING_FULL(&rinfo->ring))
> +             blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
>  }
>  
>  static void blkif_restart_queue(struct work_struct *work)
>  {
> -     struct blkfront_info *info = container_of(work, struct blkfront_info, 
> work);
> +     struct blkfront_ring_info *rinfo = container_of(work, struct 
> blkfront_ring_info, work);
>  
> -     spin_lock_irq(&info->io_lock);
> -     if (info->connected == BLKIF_STATE_CONNECTED)
> -             kick_pending_request_queues(info);
> -     spin_unlock_irq(&info->io_lock);
> +     spin_lock_irq(&rinfo->dev_info->io_lock);
> +     if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
> +             kick_pending_request_queues(rinfo);
> +     spin_unlock_irq(&rinfo->dev_info->io_lock);
>  }
>  
>  static void blkif_free(struct blkfront_info *info, int suspend)
> @@ -952,6 +975,7 @@ static void blkif_free(struct blkfront_info *info, int 
> suspend)
>       struct grant *persistent_gnt;
>       struct grant *n;
>       int i, j, segs;
> +     struct blkfront_ring_info *rinfo = &info->rinfo;
>  
>       /* Prevent new requests being issued until we fix things up. */
>       spin_lock_irq(&info->io_lock);
> @@ -964,7 +988,7 @@ static void blkif_free(struct blkfront_info *info, int 
> suspend)
>       /* Remove all persistent grants */
>       if (!list_empty(&info->grants)) {
>               list_for_each_entry_safe(persistent_gnt, n,
> -                                      &info->grants, node) {
> +                                      &info->grants, node) {
>                       list_del(&persistent_gnt->node);
>                       if (persistent_gnt->gref != GRANT_INVALID_REF) {
>                               gnttab_end_foreign_access(persistent_gnt->gref,
> @@ -982,11 +1006,11 @@ static void blkif_free(struct blkfront_info *info, int 
> suspend)
>        * Remove indirect pages, this only happens when using indirect
>        * descriptors but not persistent grants
>        */
> -     if (!list_empty(&info->indirect_pages)) {
> +     if (!list_empty(&rinfo->indirect_pages)) {
>               struct page *indirect_page, *n;
>  
>               BUG_ON(info->feature_persistent);
> -             list_for_each_entry_safe(indirect_page, n, 
> &info->indirect_pages, lru) {
> +             list_for_each_entry_safe(indirect_page, n, 
> &rinfo->indirect_pages, lru) {
>                       list_del(&indirect_page->lru);
>                       __free_page(indirect_page);
>               }
> @@ -997,21 +1021,21 @@ static void blkif_free(struct blkfront_info *info, int 
> suspend)
>                * Clear persistent grants present in requests already
>                * on the shared ring
>                */
> -             if (!info->shadow[i].request)
> +             if (!rinfo->shadow[i].request)
>                       goto free_shadow;
>  
> -             segs = info->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
> -                    info->shadow[i].req.u.indirect.nr_segments :
> -                    info->shadow[i].req.u.rw.nr_segments;
> +             segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
> +                    rinfo->shadow[i].req.u.indirect.nr_segments :
> +                    rinfo->shadow[i].req.u.rw.nr_segments;
>               for (j = 0; j < segs; j++) {
> -                     persistent_gnt = info->shadow[i].grants_used[j];
> +                     persistent_gnt = rinfo->shadow[i].grants_used[j];
>                       gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
>                       if (info->feature_persistent)
>                               __free_page(pfn_to_page(persistent_gnt->pfn));
>                       kfree(persistent_gnt);
>               }
>  
> -             if (info->shadow[i].req.operation != BLKIF_OP_INDIRECT)
> +             if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
>                       /*
>                        * If this is not an indirect operation don't try to
>                        * free indirect segments
> @@ -1019,45 +1043,45 @@ static void blkif_free(struct blkfront_info *info, 
> int suspend)
>                       goto free_shadow;
>  
>               for (j = 0; j < INDIRECT_GREFS(segs); j++) {
> -                     persistent_gnt = info->shadow[i].indirect_grants[j];
> +                     persistent_gnt = rinfo->shadow[i].indirect_grants[j];
>                       gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
>                       __free_page(pfn_to_page(persistent_gnt->pfn));
>                       kfree(persistent_gnt);
>               }
>  
>  free_shadow:
> -             kfree(info->shadow[i].grants_used);
> -             info->shadow[i].grants_used = NULL;
> -             kfree(info->shadow[i].indirect_grants);
> -             info->shadow[i].indirect_grants = NULL;
> -             kfree(info->shadow[i].sg);
> -             info->shadow[i].sg = NULL;
> +             kfree(rinfo->shadow[i].grants_used);
> +             rinfo->shadow[i].grants_used = NULL;
> +             kfree(rinfo->shadow[i].indirect_grants);
> +             rinfo->shadow[i].indirect_grants = NULL;
> +             kfree(rinfo->shadow[i].sg);
> +             rinfo->shadow[i].sg = NULL;
>       }
>  
>       /* No more gnttab callback work. */
> -     gnttab_cancel_free_callback(&info->callback);
> +     gnttab_cancel_free_callback(&rinfo->callback);
>       spin_unlock_irq(&info->io_lock);
>  
>       /* Flush gnttab callback work. Must be done with no locks held. */
> -     flush_work(&info->work);
> +     flush_work(&rinfo->work);
>  
>       /* Free resources associated with old device channel. */
>       for (i = 0; i < info->nr_ring_pages; i++) {
> -             if (info->ring_ref[i] != GRANT_INVALID_REF) {
> -                     gnttab_end_foreign_access(info->ring_ref[i], 0, 0);
> -                     info->ring_ref[i] = GRANT_INVALID_REF;
> +             if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
> +                     gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
> +                     rinfo->ring_ref[i] = GRANT_INVALID_REF;
>               }
>       }
> -     free_pages((unsigned long)info->ring.sring, 
> get_order(info->nr_ring_pages * PAGE_SIZE));
> -     info->ring.sring = NULL;
> +     free_pages((unsigned long)rinfo->ring.sring, 
> get_order(info->nr_ring_pages * PAGE_SIZE));
> +     rinfo->ring.sring = NULL;
>  
> -     if (info->irq)
> -             unbind_from_irqhandler(info->irq, info);
> -     info->evtchn = info->irq = 0;
> +     if (rinfo->irq)
> +             unbind_from_irqhandler(rinfo->irq, rinfo);
> +     rinfo->evtchn = rinfo->irq = 0;
>  
>  }
>  
> -static void blkif_completion(struct blk_shadow *s, struct blkfront_info 
> *info,
> +static void blkif_completion(struct blk_shadow *s, struct blkfront_ring_info 
> *rinfo,
>                            struct blkif_response *bret)
>  {
>       int i = 0;
> @@ -1065,6 +1089,7 @@ static void blkif_completion(struct blk_shadow *s, 
> struct blkfront_info *info,
>       char *bvec_data;
>       void *shared_data;
>       int nseg;
> +     struct blkfront_info *info = rinfo->dev_info;
>  
>       nseg = s->req.operation == BLKIF_OP_INDIRECT ?
>               s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
> @@ -1126,7 +1151,7 @@ static void blkif_completion(struct blk_shadow *s, 
> struct blkfront_info *info,
>                                */
>                               if (!info->feature_persistent) {
>                                       indirect_page = 
> pfn_to_page(s->indirect_grants[i]->pfn);
> -                                     list_add(&indirect_page->lru, 
> &info->indirect_pages);
> +                                     list_add(&indirect_page->lru, 
> &rinfo->indirect_pages);
>                               }
>                               s->indirect_grants[i]->gref = GRANT_INVALID_REF;
>                               list_add_tail(&s->indirect_grants[i]->node, 
> &info->grants);
> @@ -1141,7 +1166,8 @@ static irqreturn_t blkif_interrupt(int irq, void 
> *dev_id)
>       struct blkif_response *bret;
>       RING_IDX i, rp;
>       unsigned long flags;
> -     struct blkfront_info *info = (struct blkfront_info *)dev_id;
> +     struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
> +     struct blkfront_info *info = rinfo->dev_info;
>       int error;
>  
>       spin_lock_irqsave(&info->io_lock, flags);
> @@ -1152,13 +1178,13 @@ static irqreturn_t blkif_interrupt(int irq, void 
> *dev_id)
>       }
>  
>   again:
> -     rp = info->ring.sring->rsp_prod;
> +     rp = rinfo->ring.sring->rsp_prod;
>       rmb(); /* Ensure we see queued responses up to 'rp'. */
>  
> -     for (i = info->ring.rsp_cons; i != rp; i++) {
> +     for (i = rinfo->ring.rsp_cons; i != rp; i++) {
>               unsigned long id;
>  
> -             bret = RING_GET_RESPONSE(&info->ring, i);
> +             bret = RING_GET_RESPONSE(&rinfo->ring, i);
>               id   = bret->id;
>               /*
>                * The backend has messed up and given us an id that we would
> @@ -1172,12 +1198,12 @@ static irqreturn_t blkif_interrupt(int irq, void 
> *dev_id)
>                        * the id is busted. */
>                       continue;
>               }
> -             req  = info->shadow[id].request;
> +             req  = rinfo->shadow[id].request;
>  
>               if (bret->operation != BLKIF_OP_DISCARD)
> -                     blkif_completion(&info->shadow[id], info, bret);
> +                     blkif_completion(&rinfo->shadow[id], rinfo, bret);
>  
> -             if (add_id_to_freelist(info, id)) {
> +             if (add_id_to_freelist(rinfo, id)) {
>                       WARN(1, "%s: response to %s (id %ld) couldn't be 
> recycled!\n",
>                            info->gd->disk_name, op_name(bret->operation), id);
>                       continue;
> @@ -1206,7 +1232,7 @@ static irqreturn_t blkif_interrupt(int irq, void 
> *dev_id)
>                               error = -EOPNOTSUPP;
>                       }
>                       if (unlikely(bret->status == BLKIF_RSP_ERROR &&
> -                                  info->shadow[id].req.u.rw.nr_segments == 
> 0)) {
> +                                  rinfo->shadow[id].req.u.rw.nr_segments == 
> 0)) {
>                               printk(KERN_WARNING "blkfront: %s: empty %s op 
> failed\n",
>                                      info->gd->disk_name, 
> op_name(bret->operation));
>                               error = -EOPNOTSUPP;
> @@ -1231,17 +1257,17 @@ static irqreturn_t blkif_interrupt(int irq, void 
> *dev_id)
>               }
>       }
>  
> -     info->ring.rsp_cons = i;
> +     rinfo->ring.rsp_cons = i;
>  
> -     if (i != info->ring.req_prod_pvt) {
> +     if (i != rinfo->ring.req_prod_pvt) {
>               int more_to_do;
> -             RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
> +             RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
>               if (more_to_do)
>                       goto again;
>       } else
> -             info->ring.sring->rsp_event = i + 1;
> +             rinfo->ring.sring->rsp_event = i + 1;
>  
> -     kick_pending_request_queues(info);
> +     kick_pending_request_queues(rinfo);
>  
>       spin_unlock_irqrestore(&info->io_lock, flags);
>  
> @@ -1250,15 +1276,16 @@ static irqreturn_t blkif_interrupt(int irq, void 
> *dev_id)
>  
>  
>  static int setup_blkring(struct xenbus_device *dev,
> -                      struct blkfront_info *info)
> +                      struct blkfront_ring_info *rinfo)
>  {
>       struct blkif_sring *sring;
>       int err, i;
> +     struct blkfront_info *info = rinfo->dev_info;
>       unsigned long ring_size = info->nr_ring_pages * PAGE_SIZE;
>       grant_ref_t gref[XENBUS_MAX_RING_PAGES];
>  
>       for (i = 0; i < info->nr_ring_pages; i++)
> -             info->ring_ref[i] = GRANT_INVALID_REF;
> +             rinfo->ring_ref[i] = GRANT_INVALID_REF;
>  
>       sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
>                                                      get_order(ring_size));
> @@ -1267,29 +1294,29 @@ static int setup_blkring(struct xenbus_device *dev,
>               return -ENOMEM;
>       }
>       SHARED_RING_INIT(sring);
> -     FRONT_RING_INIT(&info->ring, sring, ring_size);
> +     FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
>  
> -     err = xenbus_grant_ring(dev, info->ring.sring, info->nr_ring_pages, 
> gref);
> +     err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, 
> gref);
>       if (err < 0) {
>               free_pages((unsigned long)sring, get_order(ring_size));
> -             info->ring.sring = NULL;
> +             rinfo->ring.sring = NULL;
>               goto fail;
>       }
>       for (i = 0; i < info->nr_ring_pages; i++)
> -             info->ring_ref[i] = gref[i];
> +             rinfo->ring_ref[i] = gref[i];
>  
> -     err = xenbus_alloc_evtchn(dev, &info->evtchn);
> +     err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
>       if (err)
>               goto fail;
>  
> -     err = bind_evtchn_to_irqhandler(info->evtchn, blkif_interrupt, 0,
> -                                     "blkif", info);
> +     err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
> +                                     "blkif", rinfo);
>       if (err <= 0) {
>               xenbus_dev_fatal(dev, err,
>                                "bind_evtchn_to_irqhandler failed");
>               goto fail;
>       }
> -     info->irq = err;
> +     rinfo->irq = err;
>  
>       return 0;
>  fail:
> @@ -1307,6 +1334,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
>       int err, i;
>       unsigned int max_page_order = 0;
>       unsigned int ring_page_order = 0;
> +     struct blkfront_ring_info *rinfo = &info->rinfo;
>  
>       err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
>                          "max-ring-page-order", "%u", &max_page_order);
> @@ -1318,7 +1346,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
>       }
>  
>       /* Create shared ring, alloc event channel. */
> -     err = setup_blkring(dev, info);
> +     err = setup_blkring(dev, rinfo);
>       if (err)
>               goto out;
>  
> @@ -1331,7 +1359,7 @@ again:
>  
>       if (info->nr_ring_pages == 1) {
>               err = xenbus_printf(xbt, dev->nodename,
> -                                 "ring-ref", "%u", info->ring_ref[0]);
> +                                 "ring-ref", "%u", rinfo->ring_ref[0]);
>               if (err) {
>                       message = "writing ring-ref";
>                       goto abort_transaction;
> @@ -1349,7 +1377,7 @@ again:
>  
>                       snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", 
> i);
>                       err = xenbus_printf(xbt, dev->nodename, ring_ref_name,
> -                                         "%u", info->ring_ref[i]);
> +                                         "%u", rinfo->ring_ref[i]);
>                       if (err) {
>                               message = "writing ring-ref";
>                               goto abort_transaction;
> @@ -1357,7 +1385,7 @@ again:
>               }
>       }
>       err = xenbus_printf(xbt, dev->nodename,
> -                         "event-channel", "%u", info->evtchn);
> +                         "event-channel", "%u", rinfo->evtchn);
>       if (err) {
>               message = "writing event-channel";
>               goto abort_transaction;
> @@ -1383,8 +1411,8 @@ again:
>       }
>  
>       for (i = 0; i < BLK_RING_SIZE(info); i++)
> -             info->shadow[i].req.u.rw.id = i+1;
> -     info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
> +             rinfo->shadow[i].req.u.rw.id = i+1;
> +     rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
>       xenbus_switch_state(dev, XenbusStateInitialised);
>  
>       return 0;
> @@ -1410,6 +1438,7 @@ static int blkfront_probe(struct xenbus_device *dev,
>  {
>       int err, vdevice;
>       struct blkfront_info *info;
> +     struct blkfront_ring_info *rinfo;
>  
>       /* FIXME: Use dynamic device id if this is not set. */
>       err = xenbus_scanf(XBT_NIL, dev->nodename,
> @@ -1459,15 +1488,18 @@ static int blkfront_probe(struct xenbus_device *dev,
>               return -ENOMEM;
>       }
>  
> +     rinfo = &info->rinfo;
> +     INIT_LIST_HEAD(&rinfo->indirect_pages);
> +     rinfo->dev_info = info;
> +     INIT_WORK(&rinfo->work, blkif_restart_queue);
> +
>       mutex_init(&info->mutex);
>       spin_lock_init(&info->io_lock);
>       info->xbdev = dev;
>       info->vdevice = vdevice;
>       INIT_LIST_HEAD(&info->grants);
> -     INIT_LIST_HEAD(&info->indirect_pages);
>       info->persistent_gnts_c = 0;
>       info->connected = BLKIF_STATE_DISCONNECTED;
> -     INIT_WORK(&info->work, blkif_restart_queue);
>  
>       /* Front end dir is a number, which is used as the id. */
>       info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
> @@ -1501,19 +1533,20 @@ static int blkif_recover(struct blkfront_info *info)
>       int pending, size;
>       struct split_bio *split_bio;
>       struct list_head requests;
> +     struct blkfront_ring_info *rinfo = &info->rinfo;
>  
>       /* Stage 1: Make a safe copy of the shadow state. */
> -     copy = kmemdup(info->shadow, sizeof(info->shadow),
> +     copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
>                      GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
>       if (!copy)
>               return -ENOMEM;
>  
>       /* Stage 2: Set up free list. */
> -     memset(&info->shadow, 0, sizeof(info->shadow));
> +     memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
>       for (i = 0; i < BLK_RING_SIZE(info); i++)
> -             info->shadow[i].req.u.rw.id = i+1;
> -     info->shadow_free = info->ring.req_prod_pvt;
> -     info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
> +             rinfo->shadow[i].req.u.rw.id = i+1;
> +     rinfo->shadow_free = rinfo->ring.req_prod_pvt;
> +     rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
>  
>       rc = blkfront_gather_backend_features(info);
>       if (rc) {
> @@ -1559,7 +1592,7 @@ static int blkif_recover(struct blkfront_info *info)
>       info->connected = BLKIF_STATE_CONNECTED;
>  
>       /* Kick any other new requests queued since we resumed */
> -     kick_pending_request_queues(info);
> +     kick_pending_request_queues(rinfo);
>  
>       list_for_each_entry_safe(req, n, &requests, queuelist) {
>               /* Requeue pending requests (flush or discard) */
> @@ -1693,17 +1726,18 @@ static void blkfront_setup_discard(struct 
> blkfront_info *info)
>               info->feature_secdiscard = !!discard_secure;
>  }
>  
> -static int blkfront_setup_indirect(struct blkfront_info *info)
> +static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
>  {
>       unsigned int segs;
>       int err, i;
> +     struct blkfront_info *info = rinfo->dev_info;
>  
>       if (info->max_indirect_segments == 0)
>               segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
>       else
>               segs = info->max_indirect_segments;
>  
> -     err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * 
> BLK_RING_SIZE(info));
> +     err = fill_grant_buffer(rinfo, (segs + INDIRECT_GREFS(segs)) * 
> BLK_RING_SIZE(info));
>       if (err)
>               goto out_of_memory;
>  
> @@ -1715,31 +1749,31 @@ static int blkfront_setup_indirect(struct 
> blkfront_info *info)
>                */
>               int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE(info);
>  
> -             BUG_ON(!list_empty(&info->indirect_pages));
> +             BUG_ON(!list_empty(&rinfo->indirect_pages));
>               for (i = 0; i < num; i++) {
>                       struct page *indirect_page = alloc_page(GFP_NOIO);
>                       if (!indirect_page)
>                               goto out_of_memory;
> -                     list_add(&indirect_page->lru, &info->indirect_pages);
> +                     list_add(&indirect_page->lru, &rinfo->indirect_pages);
>               }
>       }
>  
>       for (i = 0; i < BLK_RING_SIZE(info); i++) {
> -             info->shadow[i].grants_used = kzalloc(
> -                     sizeof(info->shadow[i].grants_used[0]) * segs,
> +             rinfo->shadow[i].grants_used = kzalloc(
> +                     sizeof(rinfo->shadow[i].grants_used[0]) * segs,
>                       GFP_NOIO);
> -             info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * 
> segs, GFP_NOIO);
> +             rinfo->shadow[i].sg = kzalloc(sizeof(rinfo->shadow[i].sg[0]) * 
> segs, GFP_NOIO);
>               if (info->max_indirect_segments)
> -                     info->shadow[i].indirect_grants = kzalloc(
> -                             sizeof(info->shadow[i].indirect_grants[0]) *
> +                     rinfo->shadow[i].indirect_grants = kzalloc(
> +                             sizeof(rinfo->shadow[i].indirect_grants[0]) *
>                               INDIRECT_GREFS(segs),
>                               GFP_NOIO);
> -             if ((info->shadow[i].grants_used == NULL) ||
> -                     (info->shadow[i].sg == NULL) ||
> +             if ((rinfo->shadow[i].grants_used == NULL) ||
> +                     (rinfo->shadow[i].sg == NULL) ||
>                    (info->max_indirect_segments &&
> -                  (info->shadow[i].indirect_grants == NULL)))
> +                  (rinfo->shadow[i].indirect_grants == NULL)))
>                       goto out_of_memory;
> -             sg_init_table(info->shadow[i].sg, segs);
> +             sg_init_table(rinfo->shadow[i].sg, segs);
>       }
>  
>  
> @@ -1747,16 +1781,16 @@ static int blkfront_setup_indirect(struct 
> blkfront_info *info)
>  
>  out_of_memory:
>       for (i = 0; i < BLK_RING_SIZE(info); i++) {
> -             kfree(info->shadow[i].grants_used);
> -             info->shadow[i].grants_used = NULL;
> -             kfree(info->shadow[i].sg);
> -             info->shadow[i].sg = NULL;
> -             kfree(info->shadow[i].indirect_grants);
> -             info->shadow[i].indirect_grants = NULL;
> +             kfree(rinfo->shadow[i].grants_used);
> +             rinfo->shadow[i].grants_used = NULL;
> +             kfree(rinfo->shadow[i].sg);
> +             rinfo->shadow[i].sg = NULL;
> +             kfree(rinfo->shadow[i].indirect_grants);
> +             rinfo->shadow[i].indirect_grants = NULL;
>       }
> -     if (!list_empty(&info->indirect_pages)) {
> +     if (!list_empty(&rinfo->indirect_pages)) {
>               struct page *indirect_page, *n;
> -             list_for_each_entry_safe(indirect_page, n, 
> &info->indirect_pages, lru) {
> +             list_for_each_entry_safe(indirect_page, n, 
> &rinfo->indirect_pages, lru) {
>                       list_del(&indirect_page->lru);
>                       __free_page(indirect_page);
>               }
> @@ -1823,7 +1857,7 @@ static int blkfront_gather_backend_features(struct 
> blkfront_info *info)
>               info->max_indirect_segments = min(indirect_segments,
>                                                 xen_blkif_max_segments);
>  
> -     return blkfront_setup_indirect(info);
> +     return blkfront_setup_indirect(&info->rinfo);
>  }
>  
>  /*
> @@ -1837,6 +1871,7 @@ static void blkfront_connect(struct blkfront_info *info)
>       unsigned int physical_sector_size;
>       unsigned int binfo;
>       int err;
> +     struct blkfront_ring_info *rinfo = &info->rinfo;
>  
>       switch (info->connected) {
>       case BLKIF_STATE_CONNECTED:
> @@ -1913,7 +1948,7 @@ static void blkfront_connect(struct blkfront_info *info)
>       /* Kick pending requests. */
>       spin_lock_irq(&info->io_lock);
>       info->connected = BLKIF_STATE_CONNECTED;
> -     kick_pending_request_queues(info);
> +     kick_pending_request_queues(rinfo);
>       spin_unlock_irq(&info->io_lock);
>  
>       add_disk(info->gd);
> -- 
> 1.8.3.1
> 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.