[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Use a block interface for each device.



# HG changeset patch
# User cl349@xxxxxxxxxxxxxxxxxxxx
# Node ID 827a3c3524b334406b863acbf03405cec451d1f4
# Parent  6078dc5f7ea1809367b720f120b73a5b38bc8f9d
Use a block interface for each device.
Lots of code cleanups, incl. a couple of bug fixes.
Signed-off-by: Christian Limpach <Christian.Limpach@xxxxxxxxxxxx>

diff -r 6078dc5f7ea1 -r 827a3c3524b3 
linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c      Mon Aug 22 
20:57:26 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c      Mon Aug 22 
20:59:00 2005
@@ -58,39 +58,21 @@
 #include <asm-xen/xen-public/grant_table.h>
 #include <asm-xen/gnttab.h>
 
-struct blkfront_info
-{
-       /* We watch the backend */
-       struct xenbus_watch watch;
-       int vdevice;
-       u16 handle;
-       int connected;
-       struct xenbus_device *dev;
-       char *backend;
-       int backend_id;
-       int grant_id;
-       blkif_front_ring_t ring;
-       unsigned int evtchn;
-};
-
 typedef unsigned char byte; /* from linux/ide.h */
 
 /* Control whether runtime update of vbds is enabled. */
 #define ENABLE_VBD_UPDATE 1
 
-#define BLKIF_STATE_CLOSED       0
-#define BLKIF_STATE_DISCONNECTED 1
-#define BLKIF_STATE_CONNECTED    2
-
-static unsigned int blkif_state = BLKIF_STATE_CLOSED;
-static unsigned int blkif_vbds_connected = 0;
+#define BLKIF_STATE_DISCONNECTED 0
+#define BLKIF_STATE_CONNECTED    1
+
+static unsigned int blkif_state = BLKIF_STATE_DISCONNECTED;
 
 #define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
 
 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \
     (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLKIF_RING_SIZE)
 #define GRANTREF_INVALID (1<<15)
-static grant_ref_t gref_head, gref_terminal;
 
 static struct blk_shadow {
     blkif_request_t req;
@@ -101,7 +83,7 @@
 
 static int recovery = 0; /* Recovery in progress: protected by blkif_io_lock */
 
-static void kick_pending_request_queues(void);
+static void kick_pending_request_queues(struct blkfront_info *info);
 
 static int __init xlblk_init(void);
 
@@ -128,7 +110,7 @@
 
 /* Kernel-specific definitions used in the common code */
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-#define DISABLE_SCATTERGATHER() 
+#define DISABLE_SCATTERGATHER()
 #else
 static int sg_operation = -1;
 #define DISABLE_SCATTERGATHER() (sg_operation = -1)
@@ -161,30 +143,34 @@
 
 module_init(xlblk_init);
 
-static struct xlbd_disk_info *head_waiting = NULL;
-static void kick_pending_request_queues(void)
-{
-    struct xlbd_disk_info *di;
-    while ( ((di = head_waiting) != NULL) && !RING_FULL(&di->info->ring) )
-    {
-        head_waiting = di->next_waiting;
-        di->next_waiting = NULL;
-        /* Re-enable calldowns. */
-        blk_start_queue(di->rq);
-        /* Kick things off immediately. */
-        do_blkif_request(di->rq);
-    }
+static void kick_pending_request_queues(struct blkfront_info *info)
+{
+       if (!RING_FULL(&info->ring)) {
+               /* Re-enable calldowns. */
+               blk_start_queue(info->rq);
+               /* Kick things off immediately. */
+               do_blkif_request(info->rq);
+       }
+}
+
+static void blkif_restart_queue(void *arg)
+{
+       struct blkfront_info *info = (struct blkfront_info *)arg;
+       spin_lock_irq(&blkif_io_lock);
+       info->callback.work = NULL;
+       kick_pending_request_queues(info);
+       spin_unlock_irq(&blkif_io_lock);
 }
 
 int blkif_open(struct inode *inode, struct file *filep)
 {
-    struct gendisk *gd = inode->i_bdev->bd_disk;
-    struct xlbd_disk_info *di = (struct xlbd_disk_info *)gd->private_data;
-
-    /* Update of usage count is protected by per-device semaphore. */
-    di->mi->usage++;
-    
-    return 0;
+       // struct gendisk *gd = inode->i_bdev->bd_disk;
+       // struct xlbd_disk_info *di = (struct xlbd_disk_info 
*)gd->private_data;
+
+       /* Update of usage count is protected by per-device semaphore. */
+       // di->mi->usage++;
+
+       return 0;
 }
 
 
@@ -201,8 +187,8 @@
     int i;
 
     DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
-                  command, (long)argument, inode->i_rdev); 
-  
+                  command, (long)argument, inode->i_rdev);
+
     switch ( command )
     {
     case HDIO_GETGEO:
@@ -228,7 +214,7 @@
 /*
  * blkif_queue_request
  *
- * request block io 
+ * request block io
  * 
  * id: for guest use only.
  * operation: BLKIF_OP_{READ,WRITE,PROBE}
@@ -237,7 +223,7 @@
  */
 static int blkif_queue_request(struct request *req)
 {
-    struct xlbd_disk_info *di = req->rq_disk->private_data;
+    struct blkfront_info *info = req->rq_disk->private_data;
     unsigned long buffer_ma;
     blkif_request_t *ring_req;
     struct bio *bio;
@@ -246,20 +232,29 @@
     unsigned long id;
     unsigned int fsect, lsect;
     int ref;
-
-    if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) )
+    grant_ref_t gref_head, gref_terminal;
+
+    if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
         return 1;
 
+    if (gnttab_alloc_grant_references(BLKIF_MAX_SEGMENTS_PER_REQUEST,
+                                     &gref_head, &gref_terminal) < 0) {
+           if (info->callback.work)
+                   return 1;
+           INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
+           gnttab_request_free_callback(&info->callback, &info->work);
+           return 1;
+    }
+
     /* Fill out a communications ring structure. */
-    ring_req = RING_GET_REQUEST(&di->info->ring, di->info->ring.req_prod_pvt);
+    ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
     id = GET_ID_FROM_FREELIST();
     blk_shadow[id].request = (unsigned long)req;
 
     ring_req->id = id;
-    ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE :
-        BLKIF_OP_READ;
+    ring_req->operation = rq_data_dir(req) ? BLKIF_OP_WRITE : BLKIF_OP_READ;
     ring_req->sector_number = (blkif_sector_t)req->sector;
-    ring_req->handle = di->handle;
+    ring_req->handle = info->handle;
 
     ring_req->nr_segments = 0;
     rq_for_each_bio(bio, req)
@@ -277,26 +272,29 @@
 
             gnttab_grant_foreign_access_ref(
                         ref,
-                        di->info->backend_id,
+                        info->backend_id,
                         buffer_ma >> PAGE_SHIFT,
                         rq_data_dir(req) );
 
             blk_shadow[id].frame[ring_req->nr_segments] =
                 buffer_ma >> PAGE_SHIFT;
 
-            ring_req->frame_and_sects[ring_req->nr_segments++] =
+            ring_req->frame_and_sects[ring_req->nr_segments] =
                 blkif_fas_from_gref(ref, fsect, lsect);
+
+           ring_req->nr_segments++;
         }
     }
 
-    di->info->ring.req_prod_pvt++;
-    
+    info->ring.req_prod_pvt++;
+
     /* Keep a private copy so we can reissue requests when recovering. */
     pickle_request(&blk_shadow[id], ring_req);
 
+    gnttab_free_grant_references(&gref_head, gref_terminal);
+
     return 0;
 }
-
 
 /*
  * do_blkif_request
@@ -304,17 +302,17 @@
  */
 void do_blkif_request(request_queue_t *rq)
 {
-    struct xlbd_disk_info *di = NULL;
+    struct blkfront_info *info = NULL;
     struct request *req;
     int queued;
 
-    DPRINTK("Entered do_blkif_request\n"); 
+    DPRINTK("Entered do_blkif_request\n");
 
     queued = 0;
 
     while ( (req = elv_next_request(rq)) != NULL )
     {
-       di = req->rq_disk->private_data;
+       info = req->rq_disk->private_data;
 
         if ( !blk_fs_request(req) )
         {
@@ -322,8 +320,8 @@
             continue;
         }
 
-        if ( RING_FULL(&di->info->ring) )
-            goto wait;
+       if (RING_FULL(&info->ring))
+               goto wait;
 
         DPRINTK("do_blk_req %p: cmd %p, sec %lx, (%u/%li) buffer:%p [%s]\n",
                 req, req->cmd, req->sector, req->current_nr_sectors,
@@ -331,24 +329,19 @@
                 rq_data_dir(req) ? "write" : "read");
 
         blkdev_dequeue_request(req);
-        if ( blkif_queue_request(req) )
-        {
+        if (blkif_queue_request(req)) {
+               blk_requeue_request(rq, req);
         wait:
-            if ( di->next_waiting == NULL )
-            {
-                di->next_waiting = head_waiting;
-                head_waiting = di;
-                /* Avoid pointless unplugs. */
-                blk_stop_queue(rq);
-            }
-            break;
+               /* Avoid pointless unplugs. */
+               blk_stop_queue(rq);
+               break;
         }
 
         queued++;
     }
 
     if ( queued != 0 )
-        flush_requests(di->info);
+        flush_requests(info);
 }
 
 
@@ -359,16 +352,14 @@
     RING_IDX i, rp;
     unsigned long flags;
     struct blkfront_info *info = (struct blkfront_info *)dev_id;
-    
-    spin_lock_irqsave(&blkif_io_lock, flags);     
-
-    if ( unlikely(blkif_state == BLKIF_STATE_CLOSED) || 
-         unlikely(recovery) )
-    {
+
+    spin_lock_irqsave(&blkif_io_lock, flags);
+
+    if (unlikely(info->connected != BLKIF_STATE_CONNECTED || recovery)) {
         spin_unlock_irqrestore(&blkif_io_lock, flags);
         return IRQ_HANDLED;
     }
-    
+
     rp = info->ring.sring->rsp_prod;
     rmb(); /* Ensure we see queued responses up to 'rp'. */
 
@@ -393,7 +384,7 @@
                         bret->status);
 
             if ( unlikely(end_that_request_first
-                          (req, 
+                          (req,
                            (bret->status == BLKIF_RSP_OKAY),
                            req->hard_nr_sectors)) )
                 BUG();
@@ -407,7 +398,7 @@
 
     info->ring.rsp_cons = i;
 
-    kick_pending_request_queues();
+    kick_pending_request_queues(info);
 
     spin_unlock_irqrestore(&blkif_io_lock, flags);
 
@@ -436,7 +427,7 @@
 static void kick_pending_request_queues(void)
 {
     /* We kick pending request queues if the ring is reasonably empty. */
-    if ( (nr_pending != 0) && 
+    if ( (nr_pending != 0) &&
          (RING_PENDING_REQUESTS(&info->ring) < (BLK_RING_SIZE >> 1)) )
     {
         /* Attempt to drain the queue, but bail if the ring becomes full. */
@@ -447,20 +438,20 @@
 
 int blkif_open(struct inode *inode, struct file *filep)
 {
-    short xldev = inode->i_rdev; 
+    short xldev = inode->i_rdev;
     struct gendisk *gd = get_gendisk(xldev);
     xl_disk_t *disk = xldev_to_xldisk(inode->i_rdev);
-    short minor = MINOR(xldev); 
+    short minor = MINOR(xldev);
 
     if ( gd->part[minor].nr_sects == 0 )
-    { 
+    {
         /*
          * Device either doesn't exist, or has zero capacity; we use a few
          * cheesy heuristics to return the relevant error code
          */
         if ( (gd->sizes[minor >> gd->minor_shift] != 0) ||
              ((minor & (gd->max_p - 1)) != 0) )
-        { 
+        {
             /*
              * We have a real device, but no such partition, or we just have a
              * partition number so guess this is the problem.
@@ -469,16 +460,16 @@
         }
         else if ( gd->flags[minor >> gd->minor_shift] & GENHD_FL_REMOVABLE )
         {
-            /* This is a removable device => assume that media is missing. */ 
+            /* This is a removable device => assume that media is missing. */
             return -ENOMEDIUM; /* media not present (this is a guess) */
-        } 
+        }
         else
-        { 
+        {
             /* Just go for the general 'no such device' error. */
             return -ENODEV;    /* no such device */
         }
     }
-    
+
     /* Update of usage count is protected by per-device semaphore. */
     disk->usage++;
 
@@ -507,24 +498,24 @@
 {
     kdev_t dev = inode->i_rdev;
     struct hd_geometry *geo = (struct hd_geometry *)argument;
-    struct gendisk *gd;     
-    struct hd_struct *part; 
+    struct gendisk *gd;
+    struct hd_struct *part;
     int i;
     unsigned short cylinders;
     byte heads, sectors;
 
     /* NB. No need to check permissions. That is done for us. */
-    
+
     DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
-                  command, (long) argument, dev); 
-  
+                  command, (long) argument, dev);
+
     gd = get_gendisk(dev);
-    part = &gd->part[MINOR(dev)]; 
+    part = &gd->part[MINOR(dev)];
 
     switch ( command )
     {
     case BLKGETSIZE:
-        DPRINTK_IOCTL("   BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects); 
+        DPRINTK_IOCTL("   BLKGETSIZE: %x %lx\n", BLKGETSIZE, part->nr_sects);
         return put_user(part->nr_sects, (unsigned long *) argument);
 
     case BLKGETSIZE64:
@@ -537,7 +528,7 @@
         return blkif_revalidate(dev);
 
     case BLKSSZGET:
-        return hardsect_size[MAJOR(dev)][MINOR(dev)]; 
+        return hardsect_size[MAJOR(dev)][MINOR(dev)];
 
     case BLKBSZGET:                                        /* get block size */
         DPRINTK_IOCTL("   BLKBSZGET: %x\n", BLKBSZGET);
@@ -563,7 +554,7 @@
            values consistent with the size of the device */
 
         heads = 0xff;
-        sectors = 0x3f; 
+        sectors = 0x3f;
         cylinders = part->nr_sects / (heads * sectors);
 
         if (put_user(0x00,  (unsigned long *) &geo->start)) return -EFAULT;
@@ -573,7 +564,7 @@
 
         return 0;
 
-    case HDIO_GETGEO_BIG: 
+    case HDIO_GETGEO_BIG:
         DPRINTK_IOCTL("   HDIO_GETGEO_BIG: %x\n", HDIO_GETGEO_BIG);
         if (!argument) return -EINVAL;
 
@@ -581,7 +572,7 @@
            values consistent with the size of the device */
 
         heads = 0xff;
-        sectors = 0x3f; 
+        sectors = 0x3f;
         cylinders = part->nr_sects / (heads * sectors);
 
         if (put_user(0x00,  (unsigned long *) &geo->start))  return -EFAULT;
@@ -605,7 +596,7 @@
         WPRINTK("ioctl %08x not supported by XL blkif\n", command);
         return -ENOSYS;
     }
-    
+
     return 0;
 }
 
@@ -625,7 +616,7 @@
     xl_disk_t *disk;
     unsigned long capacity;
     int i, rc = 0;
-    
+
     if ( (bd = bdget(dev)) == NULL )
         return -EINVAL;
 
@@ -673,7 +664,7 @@
 /*
  * blkif_queue_request
  *
- * request block io 
+ * request block io
  * 
  * id: for guest use only.
  * operation: BLKIF_OP_{READ,WRITE,PROBE}
@@ -707,7 +698,7 @@
 
     buffer_ma &= PAGE_MASK;
 
-    if ( unlikely(blkif_state != BLKIF_STATE_CONNECTED) )
+    if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
         return 1;
 
     switch ( operation )
@@ -715,7 +706,7 @@
 
     case BLKIF_OP_READ:
     case BLKIF_OP_WRITE:
-        gd = get_gendisk(device); 
+        gd = get_gendisk(device);
 
         /*
          * Update the sector_number we'll pass down as appropriate; note that
@@ -725,10 +716,10 @@
         sector_number += gd->part[MINOR(device)].start_sect;
 
         /*
-         * If this unit doesn't consist of virtual partitions then we clear 
+         * If this unit doesn't consist of virtual partitions then we clear
          * the partn bits from the device number.
          */
-        if ( !(gd->flags[MINOR(device)>>gd->minor_shift] & 
+        if ( !(gd->flags[MINOR(device)>>gd->minor_shift] &
                GENHD_FL_VIRT_PARTNS) )
             device &= ~(gd->max_p - 1);
 
@@ -736,10 +727,10 @@
              (sg_dev == device) &&
              (sg_next_sect == sector_number) )
         {
-            req = RING_GET_REQUEST(&info->ring, 
+            req = RING_GET_REQUEST(&info->ring,
                                    info->ring.req_prod_pvt - 1);
             bh = (struct buffer_head *)id;
-     
+
             bh->b_reqnext = (struct buffer_head *)blk_shadow[req->id].request;
             blk_shadow[req->id].request = (unsigned long)id;
 
@@ -793,7 +784,7 @@
     req->id            = xid;
     req->operation     = operation;
     req->sector_number = (blkif_sector_t)sector_number;
-    req->handle        = handle; 
+    req->handle        = handle;
     req->nr_segments   = 1;
     /* install a grant reference. */
     ref = gnttab_claim_grant_reference(&gref_head, gref_terminal);
@@ -809,11 +800,11 @@
 
     req->frame_and_sects[0] = blkif_fas_from_gref(ref, fsect, lsect);
 
-    /* Keep a private copy so we can reissue requests when recovering. */    
+    /* Keep a private copy so we can reissue requests when recovering. */
     pickle_request(&blk_shadow[xid], req);
 
     info->ring.req_prod_pvt++;
-    
+
     return 0;
 }
 
@@ -828,13 +819,13 @@
     struct buffer_head *bh, *next_bh;
     int rw, nsect, full, queued = 0;
 
-    DPRINTK("Entered do_blkif_request\n"); 
+    DPRINTK("Entered do_blkif_request\n");
 
     while ( !rq->plugged && !list_empty(&rq->queue_head))
     {
-        if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL ) 
+        if ( (req = blkdev_entry_next_request(&rq->queue_head)) == NULL )
             goto out;
-  
+
         DPRINTK("do_blkif_request %p: cmd %i, sec %lx, (%li/%li) bh:%p\n",
                 req, req->cmd, req->sector,
                 req->current_nr_sectors, req->nr_sectors, req->bh);
@@ -855,16 +846,16 @@
 
             full = blkif_queue_request(
                 (unsigned long)bh,
-                (rw == READ) ? BLKIF_OP_READ : BLKIF_OP_WRITE, 
+                (rw == READ) ? BLKIF_OP_READ : BLKIF_OP_WRITE,
                 bh->b_data, bh->b_rsector, bh->b_size>>9, bh->b_rdev);
 
             if ( full )
-            { 
+            {
                 bh->b_reqnext = next_bh;
                 pending_queues[nr_pending++] = rq;
                 if ( unlikely(nr_pending >= MAX_PENDING) )
                     BUG();
-                goto out; 
+                goto out;
             }
 
             queued++;
@@ -872,7 +863,7 @@
             /* Dequeue the buffer head from the request. */
             nsect = bh->b_size >> 9;
             bh = req->bh = next_bh;
-            
+
             if ( bh != NULL )
             {
                 /* There's another buffer head to do. Update the request. */
@@ -902,13 +893,13 @@
 
 static void blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
 {
-    RING_IDX i, rp; 
-    unsigned long flags; 
+    RING_IDX i, rp;
+    unsigned long flags;
     struct buffer_head *bh, *next_bh;
-    
-    spin_lock_irqsave(&io_request_lock, flags);     
-
-    if ( unlikely(blkif_state == BLKIF_STATE_CLOSED || recovery) )
+
+    spin_lock_irqsave(&io_request_lock, flags);
+
+    if ( unlikely(info->connected != BLKIF_STATE_CONNECTED || recovery) )
     {
         spin_unlock_irqrestore(&io_request_lock, flags);
         return;
@@ -921,7 +912,7 @@
     {
         unsigned long id;
         blkif_response_t *bret;
-        
+
         bret = RING_GET_RESPONSE(&info->ring, i);
         id = bret->id;
         bh = (struct buffer_head *)blk_shadow[id].request;
@@ -955,7 +946,7 @@
 
     }
     info->ring.rsp_cons = i;
-    
+
     kick_pending_request_queues();
 
     spin_unlock_irqrestore(&io_request_lock, flags);
@@ -969,7 +960,7 @@
 {
     /* Prevent new requests being issued until we fix things up. */
     spin_lock_irq(&blkif_io_lock);
-    blkif_state = BLKIF_STATE_DISCONNECTED;
+    info->connected = BLKIF_STATE_DISCONNECTED;
     spin_unlock_irq(&blkif_io_lock);
 
     /* Free resources associated with old device channel. */
@@ -1045,7 +1036,7 @@
     flush_requests(info);
 
     /* Now safe to left other people use the interface. */
-    blkif_state = BLKIF_STATE_CONNECTED;
+    info->connected = BLKIF_STATE_CONNECTED;
 }
 
 static void blkif_connect(struct blkfront_info *info, u16 evtchn)
@@ -1080,31 +1071,29 @@
        node += strlen(watch->node);
 
        /* FIXME: clean up when error on the other end. */
-       if (info->connected)
+       if (info->connected == BLKIF_STATE_CONNECTED)
                return;
 
-       err = xenbus_gather(watch->node, 
+       err = xenbus_gather(watch->node,
                            "sectors", "%lu", &sectors,
                            "info", "%u", &binfo,
                            "sector-size", "%lu", &sector_size,
                            NULL);
        if (err) {
-               xenbus_dev_error(info->dev, err, "reading backend fields");
+               xenbus_dev_error(info->xbdev, err, "reading backend fields");
                return;
        }
 
-       xlvbd_add(sectors, info->vdevice, info->handle, binfo, sector_size, 
info);
-       info->connected = 1;
-
-       /* First to connect?  blkif is now connected. */
-       if (blkif_vbds_connected++ == 0)
-               blkif_state = BLKIF_STATE_CONNECTED;
-
-       xenbus_dev_ok(info->dev);
+       xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
+       info->connected = BLKIF_STATE_CONNECTED;
+
+       blkif_state = BLKIF_STATE_CONNECTED;
+
+       xenbus_dev_ok(info->xbdev);
 
        /* Kick pending requests. */
        spin_lock_irq(&blkif_io_lock);
-       kick_pending_request_queues();
+       kick_pending_request_queues(info);
        spin_unlock_irq(&blkif_io_lock);
 }
 
@@ -1122,17 +1111,20 @@
        SHARED_RING_INIT(sring);
        FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
 
-       info->grant_id = gnttab_claim_grant_reference(&gref_head,
-                                                     gref_terminal);
-       ASSERT(info->grant_id != -ENOSPC);
-       gnttab_grant_foreign_access_ref(info->grant_id,
-                                       info->backend_id,
-                                       virt_to_mfn(info->ring.sring),
-                                       0);
+       err = gnttab_grant_foreign_access(info->backend_id,
+                                         virt_to_mfn(info->ring.sring), 0);
+       if (err == -ENOSPC) {
+               free_page((unsigned long)info->ring.sring);
+               info->ring.sring = 0;
+               xenbus_dev_error(dev, err, "granting access to ring page");
+               return err;
+       }
+       info->grant_id = err;
 
        op.u.alloc_unbound.dom = info->backend_id;
        err = HYPERVISOR_event_channel_op(&op);
        if (err) {
+               gnttab_end_foreign_access(info->grant_id, 0);
                free_page((unsigned long)info->ring.sring);
                info->ring.sring = 0;
                xenbus_dev_error(dev, err, "allocating event channel");
@@ -1246,9 +1238,10 @@
                xenbus_dev_error(dev, err, "allocating info structure");
                return err;
        }
-       info->dev = dev;
+       info->xbdev = dev;
        info->vdevice = vdevice;
-       info->connected = 0;
+       info->connected = BLKIF_STATE_DISCONNECTED;
+       info->mi = NULL;
 
        /* Front end dir is a number, which is used as the id. */
        info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
@@ -1272,10 +1265,8 @@
        if (info->backend)
                unregister_xenbus_watch(&info->watch);
 
-       if (info->connected) {
-               xlvbd_del(info->handle);
-               blkif_vbds_connected--;
-       }
+       if (info->mi)
+               xlvbd_del(info);
 
        blkif_free(info);
 
@@ -1359,11 +1350,6 @@
          (xen_start_info.flags & SIF_BLK_BE_DOMAIN) )
         return 0;
 
-    /* A grant for every ring slot, plus one for the ring itself. */
-    if (gnttab_alloc_grant_references(MAXIMUM_OUTSTANDING_BLOCK_REQS + 1,
-                                     &gref_head, &gref_terminal) < 0)
-        return 1;
-
     IPRINTK("Initialising virtual block device driver\n");
 
     blk_shadow_free = 0;
@@ -1383,6 +1369,6 @@
 {
     int i;
     for ( i = 0; i < s->req.nr_segments; i++ )
-        gnttab_release_grant_reference(
-            &gref_head, blkif_gref_from_fas(s->req.frame_and_sects[i]));
-}
+        gnttab_free_grant_reference(
+               blkif_gref_from_fas(s->req.frame_and_sects[i]));
+}
diff -r 6078dc5f7ea1 -r 827a3c3524b3 
linux-2.6-xen-sparse/drivers/xen/blkfront/block.h
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Mon Aug 22 20:57:26 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/block.h Mon Aug 22 20:59:00 2005
@@ -46,6 +46,7 @@
 #include <linux/major.h>
 #include <linux/devfs_fs_kernel.h>
 #include <asm-xen/hypervisor.h>
+#include <asm-xen/xenbus.h>
 #include <asm-xen/xen-public/xen.h>
 #include <asm-xen/xen-public/io/blkif.h>
 #include <asm-xen/xen-public/io/ring.h>
@@ -79,13 +80,20 @@
 #define DPRINTK_IOCTL(_f, _a...) ((void)0)
 #endif
 
-struct blkfront_info;
+struct xlbd_type_info
+{
+       int partn_shift;
+       int disks_per_major;
+       char *devname;
+       char *diskname;
+};
 
-struct xlbd_type_info {
-    int partn_shift;
-    int disks_per_major;
-    char *devname;
-    char *diskname;
+struct xlbd_major_info
+{
+       int major;
+       int index;
+       int usage;
+       struct xlbd_type_info *type;
 };
 
 /*
@@ -93,27 +101,27 @@
  * hang in private_data off the gendisk structure. We may end up
  * putting all kinds of interesting stuff here :-)
  */
-struct xlbd_major_info {
-    int major;
-    int index;
-    int usage;
-    struct xlbd_type_info *type;
+struct blkfront_info
+{
+       struct xenbus_device *xbdev;
+       /* We watch the backend */
+       struct xenbus_watch watch;
+       dev_t dev;
+       int vdevice;
+       blkif_vdev_t handle;
+       int connected;
+       char *backend;
+       int backend_id;
+       int grant_id;
+       blkif_front_ring_t ring;
+       unsigned int evtchn;
+       struct xlbd_major_info *mi;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+       request_queue_t *rq;
+#endif
+       struct work_struct work;
+       struct gnttab_free_callback callback;
 };
-
-struct xlbd_disk_info {
-    int xd_device;
-    blkif_vdev_t handle;
-    struct xlbd_major_info *mi;
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
-    struct xlbd_disk_info  *next_waiting;
-    request_queue_t        *rq;
-#endif
-    struct blkfront_info *info;
-};
-
-typedef struct xen_block {
-    int usage;
-} xen_block_t;
 
 extern spinlock_t blkif_io_lock;
 
@@ -126,7 +134,7 @@
 extern void do_blkif_request (request_queue_t *rq); 
 
 /* Virtual block-device subsystem. */
-int xlvbd_add(blkif_sector_t capacity, int device, blkif_vdev_t handle,
+int xlvbd_add(blkif_sector_t capacity, int device,
              u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
-void xlvbd_del(blkif_vdev_t handle);
+void xlvbd_del(struct blkfront_info *info);
 #endif /* __XEN_DRIVERS_BLOCK_H__ */
diff -r 6078dc5f7ea1 -r 827a3c3524b3 
linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c   Mon Aug 22 20:57:26 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/vbd.c   Mon Aug 22 20:59:00 2005
@@ -43,46 +43,37 @@
 #define NUM_SCSI_MAJORS 9
 #define NUM_VBD_MAJORS 1
 
-struct lvdisk
-{
-    blkif_sector_t capacity; /*  0: Size in terms of 512-byte sectors.   */
-    blkif_vdev_t   handle;   /*  8: Device number (opaque 16 bit value). */
-    u16            info;
-    dev_t          dev;
-    struct list_head list;
+static struct xlbd_type_info xlbd_ide_type = {
+       .partn_shift = 6,
+       .disks_per_major = 2,
+       .devname = "ide",
+       .diskname = "hd",
 };
 
-static struct xlbd_type_info xlbd_ide_type = {
-    .partn_shift = 6,
-    .disks_per_major = 2,
-    .devname = "ide",
-    .diskname = "hd",
+static struct xlbd_type_info xlbd_scsi_type = {
+       .partn_shift = 4,
+       .disks_per_major = 16,
+       .devname = "sd",
+       .diskname = "sd",
 };
 
-static struct xlbd_type_info xlbd_scsi_type = {
-    .partn_shift = 4,
-    .disks_per_major = 16,
-    .devname = "sd",
-    .diskname = "sd",
+static struct xlbd_type_info xlbd_vbd_type = {
+       .partn_shift = 4,
+       .disks_per_major = 16,
+       .devname = "xvd",
+       .diskname = "xvd",
 };
 
-static struct xlbd_type_info xlbd_vbd_type = {
-    .partn_shift = 4,
-    .disks_per_major = 16,
-    .devname = "xvd",
-    .diskname = "xvd",
-};
-
 static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
-                                         NUM_VBD_MAJORS];
-
-#define XLBD_MAJOR_IDE_START    0
-#define XLBD_MAJOR_SCSI_START   (NUM_IDE_MAJORS)
-#define XLBD_MAJOR_VBD_START    (NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
-
-#define XLBD_MAJOR_IDE_RANGE    XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START 
- 1
-#define XLBD_MAJOR_SCSI_RANGE   XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START 
- 1
-#define XLBD_MAJOR_VBD_RANGE    XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START 
+ NUM_VBD_MAJORS - 1
+                                         NUM_VBD_MAJORS];
+
+#define XLBD_MAJOR_IDE_START   0
+#define XLBD_MAJOR_SCSI_START  (NUM_IDE_MAJORS)
+#define XLBD_MAJOR_VBD_START   (NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
+
+#define XLBD_MAJOR_IDE_RANGE   XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START 
- 1
+#define XLBD_MAJOR_SCSI_RANGE  XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START 
- 1
+#define XLBD_MAJOR_VBD_RANGE   XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + 
NUM_VBD_MAJORS - 1
 
 /* Information about our VBDs. */
 #define MAX_VBDS 64
@@ -91,279 +82,233 @@
 #define MAJOR_XEN(dev) ((dev)>>8)
 #define MINOR_XEN(dev) ((dev) & 0xff)
 
-static struct block_device_operations xlvbd_block_fops = 
-{
-    .owner  = THIS_MODULE,
-    .open  = blkif_open,
-    .release = blkif_release,
-    .ioctl  = blkif_ioctl,
+static struct block_device_operations xlvbd_block_fops =
+{
+       .owner = THIS_MODULE,
+       .open = blkif_open,
+       .release = blkif_release,
+       .ioctl  = blkif_ioctl,
 };
 
 spinlock_t blkif_io_lock = SPIN_LOCK_UNLOCKED;
 
-static struct lvdisk *xlvbd_device_alloc(void)
-{
-    struct lvdisk *disk;
-
-    disk = kmalloc(sizeof(*disk), GFP_KERNEL);
-    if (disk != NULL) {
-        memset(disk, 0, sizeof(*disk));
-        INIT_LIST_HEAD(&disk->list);
-    }
-    return disk;
-}
-
-static void xlvbd_device_free(struct lvdisk *disk)
-{
-    list_del(&disk->list);
-    kfree(disk);
-}
-
-static struct xlbd_major_info *xlbd_alloc_major_info(
-    int major, int minor, int index)
-{
-    struct xlbd_major_info *ptr;
-
-    ptr = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
-    if (ptr == NULL)
-        return NULL;
-
-    memset(ptr, 0, sizeof(struct xlbd_major_info));
-
-    ptr->major = major;
-
-    switch (index) {
-    case XLBD_MAJOR_IDE_RANGE:
-        ptr->type = &xlbd_ide_type;
-        ptr->index = index - XLBD_MAJOR_IDE_START;
-        break;
-    case XLBD_MAJOR_SCSI_RANGE:
-        ptr->type = &xlbd_scsi_type;
-        ptr->index = index - XLBD_MAJOR_SCSI_START;
-        break;
-    case XLBD_MAJOR_VBD_RANGE:
-        ptr->type = &xlbd_vbd_type;
-        ptr->index = index - XLBD_MAJOR_VBD_START;
-        break;
-    }
-    
-    printk("Registering block device major %i\n", ptr->major);
-    if (register_blkdev(ptr->major, ptr->type->devname)) {
-        WPRINTK("can't get major %d with name %s\n",
-                ptr->major, ptr->type->devname);
-        kfree(ptr);
-        return NULL;
-    }
-
-    devfs_mk_dir(ptr->type->devname);
-    major_info[index] = ptr;
-    return ptr;
-}
-
-static struct xlbd_major_info *xlbd_get_major_info(int device)
-{
-    int major, minor, index;
-
-    major = MAJOR_XEN(device);
-    minor = MINOR_XEN(device);
-
-    switch (major) {
-    case IDE0_MAJOR: index = 0; break;
-    case IDE1_MAJOR: index = 1; break;
-    case IDE2_MAJOR: index = 2; break;
-    case IDE3_MAJOR: index = 3; break;
-    case IDE4_MAJOR: index = 4; break;
-    case IDE5_MAJOR: index = 5; break;
-    case IDE6_MAJOR: index = 6; break;
-    case IDE7_MAJOR: index = 7; break;
-    case IDE8_MAJOR: index = 8; break;
-    case IDE9_MAJOR: index = 9; break;
-    case SCSI_DISK0_MAJOR: index = 10; break;
-    case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
-        index = 11 + major - SCSI_DISK1_MAJOR;
-        break;
-    case SCSI_CDROM_MAJOR: index = 18; break;
-    default: index = 19; break;
-    }
-
-    return ((major_info[index] != NULL) ? major_info[index] :
-            xlbd_alloc_major_info(major, minor, index));
-}
-
-static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
-{
-    request_queue_t *rq;
-
-    rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
-    if (rq == NULL)
-        return -1;
-
-    elevator_init(rq, "noop");
-
-    /* Hard sector size and max sectors impersonate the equiv. hardware. */
-    blk_queue_hardsect_size(rq, sector_size);
-    blk_queue_max_sectors(rq, 512);
-
-    /* Each segment in a request is up to an aligned page in size. */
-    blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
-    blk_queue_max_segment_size(rq, PAGE_SIZE);
-
-    /* Ensure a merged request will fit in a single I/O ring slot. */
-    blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-    blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
-
-    /* Make sure buffer addresses are sector-aligned. */
-    blk_queue_dma_alignment(rq, 511);
-
-    gd->queue = rq;
-
-    return 0;
-}
-
-static struct gendisk *xlvbd_alloc_gendisk(
-    struct xlbd_major_info *mi, int minor, blkif_sector_t capacity,
-    int device, blkif_vdev_t handle, u16 vdisk_info, u16 sector_size,
-    struct blkfront_info *info)
-{
-    struct gendisk *gd;
-    struct xlbd_disk_info *di;
-    int nr_minors = 1;
-
-    di = kmalloc(sizeof(struct xlbd_disk_info), GFP_KERNEL);
-    if (di == NULL)
-        return NULL;
-    memset(di, 0, sizeof(*di));
-    di->mi = mi;
-    di->xd_device = device;
-    di->handle = handle;
-    di->info = info;
-
-    if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
-        nr_minors = 1 << mi->type->partn_shift;
-
-    gd = alloc_disk(nr_minors);
-    if (gd == NULL)
-        goto out;
-
-    if (nr_minors > 1)
-        sprintf(gd->disk_name, "%s%c", mi->type->diskname,
-                'a' + mi->index * mi->type->disks_per_major +
-                    (minor >> mi->type->partn_shift));
-    else
-        sprintf(gd->disk_name, "%s%c%d", mi->type->diskname,
-                'a' + mi->index * mi->type->disks_per_major +
-                (minor >> mi->type->partn_shift),
-                minor & ((1 << mi->type->partn_shift) - 1));
-
-    gd->major = mi->major;
-    gd->first_minor = minor;
-    gd->fops = &xlvbd_block_fops;
-    gd->private_data = di;
-    set_capacity(gd, capacity);
-
-    if (xlvbd_init_blk_queue(gd, sector_size)) {
-        del_gendisk(gd);
-        goto out;
-    }
-
-    di->rq = gd->queue;
-
-    if (vdisk_info & VDISK_READONLY)
-        set_disk_ro(gd, 1);
-
-    if (vdisk_info & VDISK_REMOVABLE)
-        gd->flags |= GENHD_FL_REMOVABLE;
-
-    if (vdisk_info & VDISK_CDROM)
-        gd->flags |= GENHD_FL_CD;
-
-    add_disk(gd);
-
-    return gd;
-
-out:
-    kfree(di);
-    return NULL;
-}
-
-int xlvbd_add(blkif_sector_t capacity, int device, blkif_vdev_t handle,
-             u16 vdisk_info, u16 sector_size, struct blkfront_info *info)
-{
-    struct lvdisk *new;
-    struct block_device *bd;
-    struct gendisk *gd;
-    struct xlbd_major_info *mi;
-
-    mi = xlbd_get_major_info(device);
-    if (mi == NULL)
-        return -EPERM;
-
-    new = xlvbd_device_alloc();
-    if (new == NULL)
-        return -ENOMEM;
-    new->capacity = capacity;
-    new->info = vdisk_info;
-    new->handle = handle;
-    new->dev = MKDEV(MAJOR_XEN(device), MINOR_XEN(device));
-
-    bd = bdget(new->dev);
-    if (bd == NULL)
-        goto out;
-    
-    gd = xlvbd_alloc_gendisk(mi, MINOR_XEN(device), capacity, device, handle,
-                            vdisk_info, sector_size, info);
-    if (gd == NULL)
-        goto out_bd;
-
-    list_add(&new->list, &vbds_list);
-out_bd:
-    bdput(bd);
-out:
-    return 0;
-}
-
-static int xlvbd_device_del(struct lvdisk *disk)
-{
-    struct block_device *bd;
-    struct gendisk *gd;
-    struct xlbd_disk_info *di;
-    int ret = 0, unused;
-    request_queue_t *rq;
-
-    bd = bdget(disk->dev);
-    if (bd == NULL)
-        return -1;
-
-    gd = get_gendisk(disk->dev, &unused);
-    di = gd->private_data;
-
-#if 0 /* This is wrong: hda and hdb share same major, for example. */
-    if (di->mi->usage != 0) {
-        WPRINTK("disk removal failed: used [dev=%x]\n", disk->dev);
-        ret = -1;
-        goto out;
-    }
-#endif
-
-    rq = gd->queue;
-    del_gendisk(gd);
-    put_disk(gd);
-    blk_cleanup_queue(rq);
-
-    xlvbd_device_free(disk);
-    bdput(bd);
-    return ret;
-}
-
-void xlvbd_del(blkif_vdev_t handle)
-{
-       struct lvdisk *i;
-
-       list_for_each_entry(i, &vbds_list, list) {
-               if (i->handle == handle) {
-                       xlvbd_device_del(i);
-                       return;
-               }
+static struct xlbd_major_info *
+xlbd_alloc_major_info(int major, int minor, int index)
+{
+       struct xlbd_major_info *ptr;
+
+       ptr = kmalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
+       if (ptr == NULL)
+               return NULL;
+
+       memset(ptr, 0, sizeof(struct xlbd_major_info));
+
+       ptr->major = major;
+
+       switch (index) {
+       case XLBD_MAJOR_IDE_RANGE:
+               ptr->type = &xlbd_ide_type;
+               ptr->index = index - XLBD_MAJOR_IDE_START;
+               break;
+       case XLBD_MAJOR_SCSI_RANGE:
+               ptr->type = &xlbd_scsi_type;
+               ptr->index = index - XLBD_MAJOR_SCSI_START;
+               break;
+       case XLBD_MAJOR_VBD_RANGE:
+               ptr->type = &xlbd_vbd_type;
+               ptr->index = index - XLBD_MAJOR_VBD_START;
+               break;
        }
-       BUG();
-}
+
+       printk("Registering block device major %i\n", ptr->major);
+       if (register_blkdev(ptr->major, ptr->type->devname)) {
+               WPRINTK("can't get major %d with name %s\n",
+                       ptr->major, ptr->type->devname);
+               kfree(ptr);
+               return NULL;
+       }
+
+       devfs_mk_dir(ptr->type->devname);
+       major_info[index] = ptr;
+       return ptr;
+}
+
+static struct xlbd_major_info *
+xlbd_get_major_info(int vdevice)
+{
+       struct xlbd_major_info *mi;
+       int major, minor, index;
+
+       major = MAJOR_XEN(vdevice);
+       minor = MINOR_XEN(vdevice);
+
+       switch (major) {
+       case IDE0_MAJOR: index = 0; break;
+       case IDE1_MAJOR: index = 1; break;
+       case IDE2_MAJOR: index = 2; break;
+       case IDE3_MAJOR: index = 3; break;
+       case IDE4_MAJOR: index = 4; break;
+       case IDE5_MAJOR: index = 5; break;
+       case IDE6_MAJOR: index = 6; break;
+       case IDE7_MAJOR: index = 7; break;
+       case IDE8_MAJOR: index = 8; break;
+       case IDE9_MAJOR: index = 9; break;
+       case SCSI_DISK0_MAJOR: index = 10; break;
+       case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
+               index = 11 + major - SCSI_DISK1_MAJOR;
+               break;
+       case SCSI_CDROM_MAJOR: index = 18; break;
+       default: index = 19; break;
+       }
+
+       mi = ((major_info[index] != NULL) ? major_info[index] :
+             xlbd_alloc_major_info(major, minor, index));
+       mi->usage++;
+       return mi;
+}
+
+static void
+xlbd_put_major_info(struct xlbd_major_info *mi)
+{
+       mi->usage--;
+       /* XXX: release major if 0 */
+}
+
+static int
+xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
+{
+       request_queue_t *rq;
+
+       rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
+       if (rq == NULL)
+               return -1;
+
+       elevator_init(rq, "noop");
+
+       /* Hard sector size and max sectors impersonate the equiv. hardware. */
+       blk_queue_hardsect_size(rq, sector_size);
+       blk_queue_max_sectors(rq, 512);
+
+       /* Each segment in a request is up to an aligned page in size. */
+       blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
+       blk_queue_max_segment_size(rq, PAGE_SIZE);
+
+       /* Ensure a merged request will fit in a single I/O ring slot. */
+       blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+       blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
+
+       /* Make sure buffer addresses are sector-aligned. */
+       blk_queue_dma_alignment(rq, 511);
+
+       gd->queue = rq;
+
+       return 0;
+}
+
+static int
+xlvbd_alloc_gendisk(int minor, blkif_sector_t capacity, int vdevice,
+                   u16 vdisk_info, u16 sector_size,
+                   struct blkfront_info *info)
+{
+       struct gendisk *gd;
+       struct xlbd_major_info *mi;
+       int nr_minors = 1;
+       int err = -ENODEV;
+
+       mi = xlbd_get_major_info(vdevice);
+       if (mi == NULL)
+               goto out;
+       info->mi = mi;
+
+       if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
+               nr_minors = 1 << mi->type->partn_shift;
+
+       gd = alloc_disk(nr_minors);
+       if (gd == NULL)
+               goto out;
+
+       if (nr_minors > 1)
+               sprintf(gd->disk_name, "%s%c", mi->type->diskname,
+                       'a' + mi->index * mi->type->disks_per_major +
+                       (minor >> mi->type->partn_shift));
+       else
+               sprintf(gd->disk_name, "%s%c%d", mi->type->diskname,
+                       'a' + mi->index * mi->type->disks_per_major +
+                       (minor >> mi->type->partn_shift),
+                       minor & ((1 << mi->type->partn_shift) - 1));
+
+       gd->major = mi->major;
+       gd->first_minor = minor;
+       gd->fops = &xlvbd_block_fops;
+       gd->private_data = info;
+       set_capacity(gd, capacity);
+
+       if (xlvbd_init_blk_queue(gd, sector_size)) {
+               del_gendisk(gd);
+               goto out;
+       }
+
+       info->rq = gd->queue;
+
+       if (vdisk_info & VDISK_READONLY)
+               set_disk_ro(gd, 1);
+
+       if (vdisk_info & VDISK_REMOVABLE)
+               gd->flags |= GENHD_FL_REMOVABLE;
+
+       if (vdisk_info & VDISK_CDROM)
+               gd->flags |= GENHD_FL_CD;
+
+       add_disk(gd);
+
+       return 0;
+
+ out:
+       if (mi)
+               xlbd_put_major_info(mi);
+       return err;
+}
+
+int
+xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
+         u16 sector_size, struct blkfront_info *info)
+{
+       struct block_device *bd;
+       int err = 0;
+
+       info->dev = MKDEV(MAJOR_XEN(vdevice), MINOR_XEN(vdevice));
+
+       bd = bdget(info->dev);
+       if (bd == NULL)
+               return -ENODEV;
+
+       err = xlvbd_alloc_gendisk(MINOR_XEN(vdevice), capacity, vdevice,
+                                 vdisk_info, sector_size, info);
+
+       bdput(bd);
+       return err;
+}
+
+void
+xlvbd_del(struct blkfront_info *info)
+{
+       struct block_device *bd;
+       struct gendisk *gd;
+       int unused;
+       request_queue_t *rq;
+
+       bd = bdget(info->dev);
+       if (bd == NULL)
+               return;
+
+       gd = get_gendisk(info->dev, &unused);
+       rq = gd->queue;
+
+       del_gendisk(gd);
+       put_disk(gd);
+       xlbd_put_major_info(info->mi);
+       info->mi = NULL;
+       blk_cleanup_queue(rq);
+
+       bdput(bd);
+}

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.