[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] merge with linux-2.6.18-xen.hg



# HG changeset patch
# User Alex Williamson <alex.williamson@xxxxxx>
# Date 1203006901 25200
# Node ID 31d71546a6442785e55d9ba2b94796701cc40276
# Parent  482aa2c39049a5bf2d17dfbee348ad4489238913
# Parent  d9eaaff9a9bdb37b1aec92e071dc47e038bbd2c5
merge with linux-2.6.18-xen.hg
---
 .hgignore                                  |    3 
 .hgtags                                    |    1 
 arch/i386/lib/Makefile                     |    1 
 arch/i386/lib/scrub.c                      |   21 ++++++
 arch/i386/mm/hypervisor.c                  |   94 ++++++++++++++++-------------
 arch/x86_64/lib/Makefile                   |    1 
 arch/x86_64/lib/scrub.c                    |    1 
 block/elevator.c                           |   10 +++
 block/ll_rw_blk.c                          |   54 ++++++++++------
 drivers/xen/balloon/balloon.c              |   32 ++++++---
 drivers/xen/blkback/blkback.c              |   16 ++++
 drivers/xen/fbfront/xenkbd.c               |    4 -
 drivers/xen/netfront/accel.c               |   34 ++--------
 fs/bio.c                                   |    3 
 fs/splice.c                                |    3 
 include/asm-i386/mach-xen/asm/hypervisor.h |    2 
 include/linux/bio.h                        |   19 +++++
 include/linux/blkdev.h                     |    2 
 mm/highmem.c                               |    6 +
 19 files changed, 205 insertions(+), 102 deletions(-)

diff -r 482aa2c39049 -r 31d71546a644 .hgignore
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/.hgignore Thu Feb 14 09:35:01 2008 -0700
@@ -0,0 +1,3 @@
+.*\.orig$
+.*\.rej$
+\.valid-src$
\ No newline at end of file
diff -r 482aa2c39049 -r 31d71546a644 .hgtags
--- a/.hgtags   Thu Feb 07 11:09:20 2008 -0700
+++ b/.hgtags   Thu Feb 14 09:35:01 2008 -0700
@@ -1,1 +1,2 @@ 831230e53067cb45d27b07d037b2e907b663c2db
 831230e53067cb45d27b07d037b2e907b663c2db v2.6.18
+08e85e79c65d0316bfda5e77e8a0dc7ab9ca181a xen-3.2.0
diff -r 482aa2c39049 -r 31d71546a644 arch/i386/lib/Makefile
--- a/arch/i386/lib/Makefile    Thu Feb 07 11:09:20 2008 -0700
+++ b/arch/i386/lib/Makefile    Thu Feb 14 09:35:01 2008 -0700
@@ -7,3 +7,4 @@ lib-y = checksum.o delay.o usercopy.o ge
        bitops.o
 
 lib-$(CONFIG_X86_USE_3DNOW) += mmx.o
+lib-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o
diff -r 482aa2c39049 -r 31d71546a644 arch/i386/lib/scrub.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/arch/i386/lib/scrub.c     Thu Feb 14 09:35:01 2008 -0700
@@ -0,0 +1,21 @@
+#include <asm/cpufeature.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+
+void scrub_pages(void *v, unsigned int count)
+{
+       if (likely(cpu_has_xmm2)) {
+               unsigned long n = count * (PAGE_SIZE / sizeof(long) / 4);
+
+               for (; n--; v += sizeof(long) * 4)
+                       asm("movnti %1,(%0)\n\t"
+                           "movnti %1,%c2(%0)\n\t"
+                           "movnti %1,2*%c2(%0)\n\t"
+                           "movnti %1,3*%c2(%0)\n\t"
+                           : : "r" (v), "r" (0L), "i" (sizeof(long))
+                           : "memory");
+               asm volatile("sfence" : : : "memory");
+       } else
+               for (; count--; v += PAGE_SIZE)
+                       clear_page(v);
+}
diff -r 482aa2c39049 -r 31d71546a644 arch/i386/mm/hypervisor.c
--- a/arch/i386/mm/hypervisor.c Thu Feb 07 11:09:20 2008 -0700
+++ b/arch/i386/mm/hypervisor.c Thu Feb 14 09:35:01 2008 -0700
@@ -280,7 +280,7 @@ int xen_create_contiguous_region(
        set_xen_guest_handle(exchange.in.extent_start, in_frames);
        set_xen_guest_handle(exchange.out.extent_start, &out_frame);
 
-       scrub_pages(vstart, 1 << order);
+       scrub_pages((void *)vstart, 1 << order);
 
        balloon_lock(flags);
 
@@ -373,7 +373,7 @@ void xen_destroy_contiguous_region(unsig
        set_xen_guest_handle(exchange.in.extent_start, &in_frame);
        set_xen_guest_handle(exchange.out.extent_start, out_frames);
 
-       scrub_pages(vstart, 1 << order);
+       scrub_pages((void *)vstart, 1 << order);
 
        balloon_lock(flags);
 
@@ -434,19 +434,17 @@ int xen_limit_pages_to_max_mfn(
 {
        unsigned long flags, frame;
        unsigned long *in_frames = discontig_frames, *out_frames = 
limited_frames;
-       void *v;
        struct page *page;
-       unsigned int i, nr_mcl;
+       unsigned int i, n, nr_mcl;
        int rc, success;
+       DECLARE_BITMAP(limit_map, 1 << MAX_CONTIG_ORDER);
 
        struct xen_memory_exchange exchange = {
                .in = {
-                       .nr_extents   = 1UL << order,
                        .extent_order = 0,
                        .domid        = DOMID_SELF
                },
                .out = {
-                       .nr_extents   = 1UL << order,
                        .extent_order = 0,
                        .address_bits = address_bits,
                        .domid        = DOMID_SELF
@@ -459,80 +457,98 @@ int xen_limit_pages_to_max_mfn(
        if (unlikely(order > MAX_CONTIG_ORDER))
                return -ENOMEM;
 
+       bitmap_zero(limit_map, 1U << order);
        set_xen_guest_handle(exchange.in.extent_start, in_frames);
        set_xen_guest_handle(exchange.out.extent_start, out_frames);
 
        /* 0. Scrub the pages. */
-       for ( i = 0 ; i < 1UL<<order ; i++ ) {
+       for (i = 0, n = 0; i < 1U<<order ; i++) {
                page = &pages[i];
-
-               if (!PageHighMem(page)) {
-                       v = page_address(page);
-                       scrub_pages(v, 1);
-               } else {
-                       v = kmap(page);
-                       scrub_pages(v, 1);
+               if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - 
PAGE_SHIFT)))
+                       continue;
+               __set_bit(i, limit_map);
+
+               if (!PageHighMem(page))
+                       scrub_pages(page_address(page), 1);
+#ifdef CONFIG_XEN_SCRUB_PAGES
+               else {
+                       scrub_pages(kmap(page), 1);
                        kunmap(page);
+                       ++n;
                }
-       }
-
-       kmap_flush_unused();
+#endif
+       }
+       if (bitmap_empty(limit_map, 1U << order))
+               return 0;
+
+       if (n)
+               kmap_flush_unused();
 
        balloon_lock(flags);
 
        /* 1. Zap current PTEs (if any), remembering MFNs. */
-       for (i = 0, nr_mcl = 0; i < (1U<<order); i++) {
+       for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
+               if(!test_bit(i, limit_map))
+                       continue;
                page = &pages[i];
 
-               out_frames[i] = page_to_pfn(page);
-               in_frames[i] = pfn_to_mfn(out_frames[i]);
+               out_frames[n] = page_to_pfn(page);
+               in_frames[n] = pfn_to_mfn(out_frames[n]);
 
                if (!PageHighMem(page))
                        MULTI_update_va_mapping(cr_mcl + nr_mcl++,
                                                (unsigned 
long)page_address(page),
                                                __pte_ma(0), 0);
 
-               set_phys_to_machine(out_frames[i], INVALID_P2M_ENTRY);
-       }
-       if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
+               set_phys_to_machine(out_frames[n], INVALID_P2M_ENTRY);
+               ++n;
+       }
+       if (nr_mcl && HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
                BUG();
 
        /* 2. Get new memory below the required limit. */
+       exchange.in.nr_extents = n;
+       exchange.out.nr_extents = n;
        rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
-       success = (exchange.nr_exchanged == (1UL << order));
+       success = (exchange.nr_exchanged == n);
        BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
        BUG_ON(success && (rc != 0));
 #if CONFIG_XEN_COMPAT <= 0x030002
        if (unlikely(rc == -ENOSYS)) {
                /* Compatibility when XENMEM_exchange is unsupported. */
                if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
-                                        &exchange.in) != (1UL << order))
+                                        &exchange.in) != n)
                        BUG();
-               success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
-                                               &exchange.out) != (1UL 
<<order));
+               if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
+                                        &exchange.out) != n)
+                       BUG();
+               success = 1;
        }
 #endif
 
        /* 3. Map the new pages in place of old pages. */
-       for (i = 0, nr_mcl = 0; i < (1U<<order); i++) {
-               unsigned long pfn;
+       for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
+               if(!test_bit(i, limit_map))
+                       continue;
                page = &pages[i];
-               pfn = page_to_pfn(page);
-
-               frame = success ? out_frames[i] : in_frames[i];
+
+               frame = success ? out_frames[n] : in_frames[n];
 
                if (!PageHighMem(page))
                        MULTI_update_va_mapping(cr_mcl + nr_mcl++,
                                                (unsigned 
long)page_address(page),
                                                pfn_pte_ma(frame, PAGE_KERNEL), 
0);
 
-               set_phys_to_machine(pfn, frame);
-       }
-       cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
-                                                       ? 
UVMF_TLB_FLUSH|UVMF_ALL
-                                                       : UVMF_INVLPG|UVMF_ALL;
-       if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
-               BUG();
+               set_phys_to_machine(page_to_pfn(page), frame);
+               ++n;
+       }
+       if (nr_mcl) {
+               cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
+                                                               ? 
UVMF_TLB_FLUSH|UVMF_ALL
+                                                               : 
UVMF_INVLPG|UVMF_ALL;
+               if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
+                       BUG();
+       }
 
        balloon_unlock(flags);
 
diff -r 482aa2c39049 -r 31d71546a644 arch/x86_64/lib/Makefile
--- a/arch/x86_64/lib/Makefile  Thu Feb 07 11:09:20 2008 -0700
+++ b/arch/x86_64/lib/Makefile  Thu Feb 14 09:35:01 2008 -0700
@@ -10,3 +10,4 @@ lib-y := csum-partial.o csum-copy.o csum
        usercopy.o getuser.o putuser.o  \
        thunk.o clear_page.o copy_page.o bitstr.o bitops.o
 lib-y += memcpy.o memmove.o memset.o copy_user.o
+lib-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o
diff -r 482aa2c39049 -r 31d71546a644 arch/x86_64/lib/scrub.c
--- /dev/null   Thu Jan 01 00:00:00 1970 +0000
+++ b/arch/x86_64/lib/scrub.c   Thu Feb 14 09:35:01 2008 -0700
@@ -0,0 +1,1 @@
+#include "../../i386/lib/scrub.c"
diff -r 482aa2c39049 -r 31d71546a644 block/elevator.c
--- a/block/elevator.c  Thu Feb 07 11:09:20 2008 -0700
+++ b/block/elevator.c  Thu Feb 14 09:35:01 2008 -0700
@@ -493,6 +493,16 @@ struct request *elv_next_request(request
        int ret;
 
        while ((rq = __elv_next_request(q)) != NULL) {
+               /*
+                * Kill the empty barrier place holder, the driver must
+                * not ever see it.
+                */
+               if (blk_empty_barrier(rq)) {
+                       blkdev_dequeue_request(rq);
+                       end_that_request_chunk(rq, 1, 0);
+                       end_that_request_last(rq, 1);
+                       continue;
+               }
                if (!(rq->flags & REQ_STARTED)) {
                        elevator_t *e = q->elevator;
 
diff -r 482aa2c39049 -r 31d71546a644 block/ll_rw_blk.c
--- a/block/ll_rw_blk.c Thu Feb 07 11:09:20 2008 -0700
+++ b/block/ll_rw_blk.c Thu Feb 14 09:35:01 2008 -0700
@@ -483,9 +483,12 @@ static inline struct request *start_orde
         * Queue ordered sequence.  As we stack them at the head, we
         * need to queue in reverse order.  Note that we rely on that
         * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
-        * request gets inbetween ordered sequence.
-        */
-       if (q->ordered & QUEUE_ORDERED_POSTFLUSH)
+        * request gets inbetween ordered sequence. If this request is
+        * an empty barrier, we don't need to do a postflush ever since
+        * there will be no data written between the pre and post flush.
+        * Hence a single flush will suffice.
+        */
+       if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
                queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
        else
                q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
@@ -2967,7 +2970,7 @@ static inline void blk_partition_remap(s
 {
        struct block_device *bdev = bio->bi_bdev;
 
-       if (bdev != bdev->bd_contains) {
+       if (bio_sectors(bio) && bdev != bdev->bd_contains) {
                struct hd_struct *p = bdev->bd_part;
                const int rw = bio_data_dir(bio);
 
@@ -3028,7 +3031,7 @@ void generic_make_request(struct bio *bi
        might_sleep();
        /* Test device or partition size, when known. */
        maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-       if (maxsector) {
+       if (maxsector && nr_sectors) {
                sector_t sector = bio->bi_sector;
 
                if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
@@ -3094,7 +3097,7 @@ end_io:
                old_dev = bio->bi_bdev->bd_dev;
 
                maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
-               if (maxsector) {
+               if (maxsector && nr_sectors) {
                        sector_t sector = bio->bi_sector;
 
                        if (maxsector < nr_sectors || maxsector - nr_sectors < 
sector) {
@@ -3128,21 +3131,25 @@ void submit_bio(int rw, struct bio *bio)
 {
        int count = bio_sectors(bio);
 
-       BIO_BUG_ON(!bio->bi_size);
-       BIO_BUG_ON(!bio->bi_io_vec);
        bio->bi_rw |= rw;
-       if (rw & WRITE)
-               count_vm_events(PGPGOUT, count);
-       else
-               count_vm_events(PGPGIN, count);
-
-       if (unlikely(block_dump)) {
-               char b[BDEVNAME_SIZE];
-               printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
-                       current->comm, current->pid,
-                       (rw & WRITE) ? "WRITE" : "READ",
-                       (unsigned long long)bio->bi_sector,
-                       bdevname(bio->bi_bdev,b));
+
+       if (!bio_empty_barrier(bio)) {
+               BIO_BUG_ON(!bio->bi_size);
+               BIO_BUG_ON(!bio->bi_io_vec);
+
+               if (rw & WRITE)
+                       count_vm_events(PGPGOUT, count);
+               else
+                       count_vm_events(PGPGIN, count);
+
+               if (unlikely(block_dump)) {
+                       char b[BDEVNAME_SIZE];
+                       printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+                               current->comm, current->pid,
+                               (rw & WRITE) ? "WRITE" : "READ",
+                               (unsigned long long)bio->bi_sector,
+                               bdevname(bio->bi_bdev,b));
+               }
        }
 
        generic_make_request(bio);
@@ -3259,6 +3266,13 @@ static int __end_that_request_first(stru
        total_bytes = bio_nbytes = 0;
        while ((bio = req->bio) != NULL) {
                int nbytes;
+
+               /* For an empty barrier request, the low level driver must
+                * store a potential error location in ->sector. We pass
+                * that back up in ->bi_sector
+                */
+               if (blk_empty_barrier(req))
+                       bio->bi_sector = req->sector;
 
                if (nr_bytes >= bio->bi_size) {
                        req->bio = bio->bi_next;
diff -r 482aa2c39049 -r 31d71546a644 drivers/xen/balloon/balloon.c
--- a/drivers/xen/balloon/balloon.c     Thu Feb 07 11:09:20 2008 -0700
+++ b/drivers/xen/balloon/balloon.c     Thu Feb 14 09:35:01 2008 -0700
@@ -104,7 +104,7 @@ static struct timer_list balloon_timer;
 /* When ballooning out (allocating memory to return to Xen) we don't really 
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
-       (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
+       (GFP_HIGHUSER|__GFP_NOWARN|__GFP_NORETRY|__GFP_NOMEMALLOC|__GFP_COLD)
 
 #define PAGE_TO_LIST(p) (&(p)->lru)
 #define LIST_TO_PAGE(l) list_entry((l), struct page, lru)
@@ -170,6 +170,17 @@ static struct page *balloon_next_page(st
        return LIST_TO_PAGE(next);
 }
 
+static inline void balloon_free_page(struct page *page)
+{
+#ifndef MODULE
+       if (put_page_testzero(page))
+               free_cold_page(page);
+#else
+       /* free_cold_page() is not being exported. */
+       __free_page(page);
+#endif
+}
+
 static void balloon_alarm(unsigned long unused)
 {
        schedule_work(&balloon_worker);
@@ -251,7 +262,7 @@ static int increase_reservation(unsigned
                /* Relinquish the page back to the allocator. */
                ClearPageReserved(page);
                init_page_count(page);
-               __free_page(page);
+               balloon_free_page(page);
        }
 
        bs.current_pages += nr_pages;
@@ -566,7 +577,8 @@ static int dealloc_pte_fn(
 
 struct page **alloc_empty_pages_and_pagevec(int nr_pages)
 {
-       unsigned long vaddr, flags;
+       unsigned long flags;
+       void *v;
        struct page *page, **pagevec;
        int i, ret;
 
@@ -575,13 +587,12 @@ struct page **alloc_empty_pages_and_page
                return NULL;
 
        for (i = 0; i < nr_pages; i++) {
-               page = pagevec[i] = alloc_page(GFP_KERNEL);
+               page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD);
                if (page == NULL)
                        goto err;
 
-               vaddr = (unsigned long)page_address(page);
-
-               scrub_pages(vaddr, 1);
+               v = page_address(page);
+               scrub_pages(v, 1);
 
                balloon_lock(flags);
 
@@ -599,8 +610,9 @@ struct page **alloc_empty_pages_and_page
                                ret = 0; /* success */
                } else {
 #ifdef CONFIG_XEN
-                       ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE,
-                                                 dealloc_pte_fn, NULL);
+                       ret = apply_to_page_range(&init_mm, (unsigned long)v,
+                                                 PAGE_SIZE, dealloc_pte_fn,
+                                                 NULL);
 #else
                        /* Cannot handle non-auto translate mode. */
                        ret = 1;
@@ -609,7 +621,7 @@ struct page **alloc_empty_pages_and_page
 
                if (ret != 0) {
                        balloon_unlock(flags);
-                       __free_page(page);
+                       balloon_free_page(page);
                        goto err;
                }
 
diff -r 482aa2c39049 -r 31d71546a644 drivers/xen/blkback/blkback.c
--- a/drivers/xen/blkback/blkback.c     Thu Feb 07 11:09:20 2008 -0700
+++ b/drivers/xen/blkback/blkback.c     Thu Feb 14 09:35:01 2008 -0700
@@ -407,7 +407,7 @@ static void dispatch_rw_block_io(blkif_t
 
        /* Check that number of segments is sane. */
        nseg = req->nr_segments;
-       if (unlikely(nseg == 0) || 
+       if (unlikely(nseg == 0 && operation != WRITE_BARRIER) || 
            unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
                DPRINTK("Bad number of segments in request (%d)\n", nseg);
                goto fail_response;
@@ -500,6 +500,18 @@ static void dispatch_rw_block_io(blkif_t
                preq.sector_number += seg[i].nsec;
        }
 
+       if (!bio) {
+               BUG_ON(operation != WRITE_BARRIER);
+               bio = biolist[nbio++] = bio_alloc(GFP_KERNEL, 0);
+               if (unlikely(bio == NULL))
+                       goto fail_put_bio;
+
+               bio->bi_bdev    = preq.bdev;
+               bio->bi_private = pending_req;
+               bio->bi_end_io  = end_block_io_op;
+               bio->bi_sector  = -1;
+       }
+
        plug_queue(blkif, bio);
        atomic_set(&pending_req->pendcnt, nbio);
        blkif_get(blkif);
@@ -509,7 +521,7 @@ static void dispatch_rw_block_io(blkif_t
 
        if (operation == READ)
                blkif->st_rd_sect += preq.nr_sects;
-       else if (operation == WRITE)
+       else if (operation == WRITE || operation == WRITE_BARRIER)
                blkif->st_wr_sect += preq.nr_sects;
 
        return;
diff -r 482aa2c39049 -r 31d71546a644 drivers/xen/fbfront/xenkbd.c
--- a/drivers/xen/fbfront/xenkbd.c      Thu Feb 07 11:09:20 2008 -0700
+++ b/drivers/xen/fbfront/xenkbd.c      Thu Feb 14 09:35:01 2008 -0700
@@ -53,7 +53,7 @@ static irqreturn_t input_handler(int rq,
        __u32 cons, prod;
 
        prod = page->in_prod;
-       if (prod == page->out_cons)
+       if (prod == page->in_cons)
                return IRQ_HANDLED;
        rmb();                  /* ensure we see ring contents up to prod */
        for (cons = page->in_cons; cons != prod; cons++) {
@@ -193,6 +193,8 @@ static int xenkbd_resume(struct xenbus_d
        struct xenkbd_info *info = dev->dev.driver_data;
 
        xenkbd_disconnect_backend(info);
+       info->page->in_cons = info->page->in_prod = 0;
+       info->page->out_cons = info->page->out_prod = 0;
        return xenkbd_connect_backend(dev, info);
 }
 
diff -r 482aa2c39049 -r 31d71546a644 drivers/xen/netfront/accel.c
--- a/drivers/xen/netfront/accel.c      Thu Feb 07 11:09:20 2008 -0700
+++ b/drivers/xen/netfront/accel.c      Thu Feb 14 09:35:01 2008 -0700
@@ -446,6 +446,9 @@ int netfront_accelerator_loaded(int vers
 {
        struct netfront_accelerator *accelerator;
 
+       if (is_initial_xendomain())
+               return -EINVAL;
+
        if (version != NETFRONT_ACCEL_VERSION) {
                if (version > NETFRONT_ACCEL_VERSION) {
                        /* Caller has higher version number, leave it
@@ -693,32 +696,11 @@ int netfront_accelerator_suspend_cancel(
 int netfront_accelerator_suspend_cancel(struct netfront_info *np,
                                        struct xenbus_device *dev)
 {
-       struct netfront_accel_vif_state *accel_vif_state = NULL;
- 
-       mutex_lock(&accelerator_mutex);
-
-       /* Check that we've got a device that was accelerated */
-       if (np->accelerator == NULL)
-               goto out;
-
-       /* Find the vif_state from the accelerator's list */
-       list_for_each_entry(accel_vif_state, &np->accelerator->vif_states,
-                           link) {
-               if (accel_vif_state->dev == dev) {
-                       BUG_ON(accel_vif_state != &np->accel_vif_state);
- 
-                       /*
-                        * Kick things off again to restore
-                        * acceleration as it was before suspend 
-                        */
-                       accelerator_probe_new_vif(np, dev, np->accelerator);
- 
-                       break;
-               }
-       }
-       
- out:
-       mutex_unlock(&accelerator_mutex);
+       /* 
+        * Setting the watch will cause it to fire and probe the
+        * accelerator, so no need to call accelerator_probe_new_vif()
+        * directly here
+        */
        netfront_accelerator_add_watch(np);
        return 0;
 }
diff -r 482aa2c39049 -r 31d71546a644 fs/bio.c
--- a/fs/bio.c  Thu Feb 07 11:09:20 2008 -0700
+++ b/fs/bio.c  Thu Feb 14 09:35:01 2008 -0700
@@ -112,7 +112,8 @@ void bio_free(struct bio *bio, struct bi
 
        BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
 
-       mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
+       if (bio->bi_io_vec)
+               mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
        mempool_free(bio, bio_set->bio_pool);
 }
 
diff -r 482aa2c39049 -r 31d71546a644 fs/splice.c
--- a/fs/splice.c       Thu Feb 07 11:09:20 2008 -0700
+++ b/fs/splice.c       Thu Feb 14 09:35:01 2008 -0700
@@ -1141,6 +1141,9 @@ static int get_iovec_page_array(const st
                if (unlikely(!base))
                        break;
 
+               if (unlikely(!access_ok(VERIFY_READ, base, len)))
+                       break;
+
                /*
                 * Get this base offset and number of pages, then map
                 * in the user pages.
diff -r 482aa2c39049 -r 31d71546a644 include/asm-i386/mach-xen/asm/hypervisor.h
--- a/include/asm-i386/mach-xen/asm/hypervisor.h        Thu Feb 07 11:09:20 
2008 -0700
+++ b/include/asm-i386/mach-xen/asm/hypervisor.h        Thu Feb 14 09:35:01 
2008 -0700
@@ -130,7 +130,7 @@ u64 jiffies_to_st(unsigned long jiffies)
 u64 jiffies_to_st(unsigned long jiffies);
 
 #ifdef CONFIG_XEN_SCRUB_PAGES
-#define scrub_pages(_p,_n) memset((void *)(_p), 0, (_n) << PAGE_SHIFT)
+void scrub_pages(void *, unsigned int);
 #else
 #define scrub_pages(_p,_n) ((void)0)
 #endif
diff -r 482aa2c39049 -r 31d71546a644 include/linux/bio.h
--- a/include/linux/bio.h       Thu Feb 07 11:09:20 2008 -0700
+++ b/include/linux/bio.h       Thu Feb 14 09:35:01 2008 -0700
@@ -172,12 +172,27 @@ struct bio {
 #define bio_offset(bio)                bio_iovec((bio))->bv_offset
 #define bio_segments(bio)      ((bio)->bi_vcnt - (bio)->bi_idx)
 #define bio_sectors(bio)       ((bio)->bi_size >> 9)
-#define bio_cur_sectors(bio)   (bio_iovec(bio)->bv_len >> 9)
-#define bio_data(bio)          (page_address(bio_page((bio))) + 
bio_offset((bio)))
 #define bio_barrier(bio)       ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
 #define bio_sync(bio)          ((bio)->bi_rw & (1 << BIO_RW_SYNC))
 #define bio_failfast(bio)      ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
 #define bio_rw_ahead(bio)      ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
+#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size)
+
+static inline unsigned int bio_cur_sectors(struct bio *bio)
+{
+       if (bio->bi_vcnt)
+               return bio_iovec(bio)->bv_len >> 9;
+
+       return 0;
+}
+
+static inline void *bio_data(struct bio *bio)
+{
+       if (bio->bi_vcnt)
+               return page_address(bio_page(bio)) + bio_offset(bio);
+
+       return NULL;
+}
 
 /*
  * will die
diff -r 482aa2c39049 -r 31d71546a644 include/linux/blkdev.h
--- a/include/linux/blkdev.h    Thu Feb 07 11:09:20 2008 -0700
+++ b/include/linux/blkdev.h    Thu Feb 14 09:35:01 2008 -0700
@@ -505,6 +505,8 @@ enum {
 #define blk_sorted_rq(rq)      ((rq)->flags & REQ_SORTED)
 #define blk_barrier_rq(rq)     ((rq)->flags & REQ_HARDBARRIER)
 #define blk_fua_rq(rq)         ((rq)->flags & REQ_FUA)
+
+#define blk_empty_barrier(rq)   (blk_barrier_rq(rq) && blk_fs_request(rq) && 
!(rq)->hard_nr_sectors)
 
 #define list_entry_rq(ptr)     list_entry((ptr), struct request, queuelist)
 
diff -r 482aa2c39049 -r 31d71546a644 mm/highmem.c
--- a/mm/highmem.c      Thu Feb 07 11:09:20 2008 -0700
+++ b/mm/highmem.c      Thu Feb 14 09:35:01 2008 -0700
@@ -466,6 +466,12 @@ void blk_queue_bounce(request_queue_t *q
 void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig)
 {
        mempool_t *pool;
+
+       /*
+        * Data-less bio, nothing to bounce
+        */
+       if (bio_empty_barrier(*bio_orig))
+               return;
 
        /*
         * for non-isa bounce case, just check if the bounce pfn is equal

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.