[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Fix range_straddles_boundary() check to exclude regions that



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID 1a94949348ff52863b9fbef4eaf102c7e46a345d
# Parent  aa8abf00726055b6a92ba96d1d2809ce86939502
Fix range_straddles_boundary() check to exclude regions that
were allocated with xen_create_contiguous_region(). Such areas
do not require bounce buffers (swiotlb).

Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>

diff -r aa8abf007260 -r 1a94949348ff 
linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c        Thu Aug 18 
10:22:08 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/hypervisor.c        Thu Aug 18 
10:41:55 2005
@@ -59,124 +59,124 @@
 #ifndef CONFIG_XEN_SHADOW_MODE
 void xen_l1_entry_update(pte_t *ptr, pte_t val)
 {
-    mmu_update_t u;
-    u.ptr = virt_to_machine(ptr);
-    u.val = pte_val_ma(val);
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = virt_to_machine(ptr);
+       u.val = pte_val_ma(val);
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
 {
-    mmu_update_t u;
-    u.ptr = virt_to_machine(ptr);
-    u.val = pmd_val_ma(val);
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = virt_to_machine(ptr);
+       u.val = pmd_val_ma(val);
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 
 #ifdef CONFIG_X86_PAE
 void xen_l3_entry_update(pud_t *ptr, pud_t val)
 {
-    mmu_update_t u;
-    u.ptr = virt_to_machine(ptr);
-    u.val = pud_val_ma(val);
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = virt_to_machine(ptr);
+       u.val = pud_val_ma(val);
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 #endif
 
 #ifdef CONFIG_X86_64
 void xen_l3_entry_update(pud_t *ptr, pud_t val)
 {
-    mmu_update_t u;
-    u.ptr = virt_to_machine(ptr);
-    u.val = val.pud;
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = virt_to_machine(ptr);
+       u.val = val.pud;
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
 {
-    mmu_update_t u;
-    u.ptr = virt_to_machine(ptr);
-    u.val = val.pgd;
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = virt_to_machine(ptr);
+       u.val = val.pgd;
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 #endif /* CONFIG_X86_64 */
 #endif /* CONFIG_XEN_SHADOW_MODE */
 
 void xen_machphys_update(unsigned long mfn, unsigned long pfn)
 {
-    mmu_update_t u;
-    u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
-    u.val = pfn;
-    BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
+       mmu_update_t u;
+       u.ptr = (mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+       u.val = pfn;
+       BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pt_switch(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_NEW_BASEPTR;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_NEW_BASEPTR;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_new_user_pt(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_NEW_USER_BASEPTR;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_NEW_USER_BASEPTR;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_tlb_flush(void)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_invlpg(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_INVLPG_LOCAL;
-    op.linear_addr = ptr & PAGE_MASK;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_INVLPG_LOCAL;
+       op.linear_addr = ptr & PAGE_MASK;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 #ifdef CONFIG_SMP
 
 void xen_tlb_flush_all(void)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_TLB_FLUSH_ALL;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_TLB_FLUSH_ALL;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_tlb_flush_mask(cpumask_t *mask)
 {
-    struct mmuext_op op;
-    if ( cpus_empty(*mask) )
-        return;
-    op.cmd = MMUEXT_TLB_FLUSH_MULTI;
-    op.vcpumask = mask->bits;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       if ( cpus_empty(*mask) )
+               return;
+       op.cmd = MMUEXT_TLB_FLUSH_MULTI;
+       op.vcpumask = mask->bits;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_invlpg_all(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_INVLPG_ALL;
-    op.linear_addr = ptr & PAGE_MASK;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_INVLPG_ALL;
+       op.linear_addr = ptr & PAGE_MASK;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
 {
-    struct mmuext_op op;
-    if ( cpus_empty(*mask) )
-        return;
-    op.cmd = MMUEXT_INVLPG_MULTI;
-    op.vcpumask = mask->bits;
-    op.linear_addr = ptr & PAGE_MASK;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       if ( cpus_empty(*mask) )
+               return;
+       op.cmd = MMUEXT_INVLPG_MULTI;
+       op.vcpumask = mask->bits;
+       op.linear_addr = ptr & PAGE_MASK;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 #endif /* CONFIG_SMP */
@@ -184,221 +184,281 @@
 #ifndef CONFIG_XEN_SHADOW_MODE
 void xen_pgd_pin(unsigned long ptr)
 {
-    struct mmuext_op op;
+       struct mmuext_op op;
 #ifdef CONFIG_X86_64
-    op.cmd = MMUEXT_PIN_L4_TABLE;
+       op.cmd = MMUEXT_PIN_L4_TABLE;
 #elif defined(CONFIG_X86_PAE)
-    op.cmd = MMUEXT_PIN_L3_TABLE;
+       op.cmd = MMUEXT_PIN_L3_TABLE;
 #else
-    op.cmd = MMUEXT_PIN_L2_TABLE;
-#endif
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       op.cmd = MMUEXT_PIN_L2_TABLE;
+#endif
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pgd_unpin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_UNPIN_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_UNPIN_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pte_pin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_PIN_L1_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_PIN_L1_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pte_unpin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_UNPIN_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_UNPIN_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 #ifdef CONFIG_X86_64
 void xen_pud_pin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_PIN_L3_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_PIN_L3_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pud_unpin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_UNPIN_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_UNPIN_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pmd_pin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_PIN_L2_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_PIN_L2_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 
 void xen_pmd_unpin(unsigned long ptr)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_UNPIN_TABLE;
-    op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_UNPIN_TABLE;
+       op.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
 }
 #endif /* CONFIG_X86_64 */
 #endif /* CONFIG_XEN_SHADOW_MODE */
 
 void xen_set_ldt(unsigned long ptr, unsigned long len)
 {
-    struct mmuext_op op;
-    op.cmd = MMUEXT_SET_LDT;
-    op.linear_addr = ptr;
-    op.nr_ents = len;
-    BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+       struct mmuext_op op;
+       op.cmd = MMUEXT_SET_LDT;
+       op.linear_addr = ptr;
+       op.nr_ents = len;
+       BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
+}
+
+/*
+ * Bitmap is indexed by page number. If bit is set, the page is part of a
+ * xen_create_contiguous_region() area of memory.
+ */
+unsigned long *contiguous_bitmap;
+
+static void contiguous_bitmap_set(
+       unsigned long first_page, unsigned long nr_pages)
+{
+       unsigned long start_off, end_off, curr_idx, end_idx;
+
+       curr_idx  = first_page / BITS_PER_LONG;
+       start_off = first_page & (BITS_PER_LONG-1);
+       end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
+       end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
+
+       if (curr_idx == end_idx) {
+               contiguous_bitmap[curr_idx] |=
+                       ((1UL<<end_off)-1) & -(1UL<<start_off);
+       } else {
+               contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
+               while ( ++curr_idx < end_idx )
+                       contiguous_bitmap[curr_idx] = ~0UL;
+               contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
+       }
+}
+
+static void contiguous_bitmap_clear(
+       unsigned long first_page, unsigned long nr_pages)
+{
+       unsigned long start_off, end_off, curr_idx, end_idx;
+
+       curr_idx  = first_page / BITS_PER_LONG;
+       start_off = first_page & (BITS_PER_LONG-1);
+       end_idx   = (first_page + nr_pages) / BITS_PER_LONG;
+       end_off   = (first_page + nr_pages) & (BITS_PER_LONG-1);
+
+       if (curr_idx == end_idx) {
+               contiguous_bitmap[curr_idx] &=
+                       -(1UL<<end_off) | ((1UL<<start_off)-1);
+       } else {
+               contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
+               while ( ++curr_idx != end_idx )
+                       contiguous_bitmap[curr_idx] = 0;
+               contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
+       }
 }
 
 /* Ensure multi-page extents are contiguous in machine memory. */
 void xen_create_contiguous_region(unsigned long vstart, unsigned int order)
 {
-    pgd_t         *pgd; 
-    pud_t         *pud; 
-    pmd_t         *pmd;
-    pte_t         *pte;
-    unsigned long  mfn, i, flags;
-
-    scrub_pages(vstart, 1 << order);
-
-    balloon_lock(flags);
-
-    /* 1. Zap current PTEs, giving away the underlying pages. */
-    for (i = 0; i < (1<<order); i++) {
-        pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
-        pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
-        pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
-        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-        mfn = pte_mfn(*pte);
-        BUG_ON(HYPERVISOR_update_va_mapping(
-            vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
-        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
-            INVALID_P2M_ENTRY;
-        BUG_ON(HYPERVISOR_dom_mem_op(
-            MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
-    }
-
-    /* 2. Get a new contiguous memory extent. */
-    BUG_ON(HYPERVISOR_dom_mem_op(
-              MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1);
-
-    /* 3. Map the new extent in place of old pages. */
-    for (i = 0; i < (1<<order); i++) {
-        BUG_ON(HYPERVISOR_update_va_mapping(
-            vstart + (i*PAGE_SIZE),
-            __pte_ma(((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
-        xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
-        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn+i;
-    }
-
-    flush_tlb_all();
-
-    balloon_unlock(flags);
+       pgd_t         *pgd; 
+       pud_t         *pud; 
+       pmd_t         *pmd;
+       pte_t         *pte;
+       unsigned long  mfn, i, flags;
+
+       scrub_pages(vstart, 1 << order);
+
+       balloon_lock(flags);
+
+       /* 1. Zap current PTEs, giving away the underlying pages. */
+       for (i = 0; i < (1<<order); i++) {
+               pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
+               pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+               pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+               pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+               mfn = pte_mfn(*pte);
+               BUG_ON(HYPERVISOR_update_va_mapping(
+                       vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+                       INVALID_P2M_ENTRY;
+               BUG_ON(HYPERVISOR_dom_mem_op(
+                       MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
+       }
+
+       /* 2. Get a new contiguous memory extent. */
+       BUG_ON(HYPERVISOR_dom_mem_op(
+               MEMOP_increase_reservation, &mfn, 1, order | (32<<8)) != 1);
+
+       /* 3. Map the new extent in place of old pages. */
+       for (i = 0; i < (1<<order); i++) {
+               BUG_ON(HYPERVISOR_update_va_mapping(
+                       vstart + (i*PAGE_SIZE),
+                       __pte_ma(((mfn+i)<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
+               xen_machphys_update(mfn+i, (__pa(vstart)>>PAGE_SHIFT)+i);
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn+i;
+       }
+
+       flush_tlb_all();
+
+       contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
+
+       balloon_unlock(flags);
 }
 
 void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
 {
-    pgd_t         *pgd; 
-    pud_t         *pud; 
-    pmd_t         *pmd;
-    pte_t         *pte;
-    unsigned long  mfn, i, flags;
-
-    scrub_pages(vstart, 1 << order);
-
-    balloon_lock(flags);
-
-    /* 1. Zap current PTEs, giving away the underlying pages. */
-    for (i = 0; i < (1<<order); i++) {
-        pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
-        pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
-        pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
-        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
-        mfn = pte_mfn(*pte);
-        BUG_ON(HYPERVISOR_update_va_mapping(
-            vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
-        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
-            INVALID_P2M_ENTRY;
-        BUG_ON(HYPERVISOR_dom_mem_op(
-            MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
-    }
-
-    /* 2. Map new pages in place of old pages. */
-    for (i = 0; i < (1<<order); i++) {
-        BUG_ON(HYPERVISOR_dom_mem_op(
-            MEMOP_increase_reservation, &mfn, 1, 0) != 1);
-        BUG_ON(HYPERVISOR_update_va_mapping(
-            vstart + (i*PAGE_SIZE),
-            __pte_ma((mfn<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
-        xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i);
-        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn;
-    }
-
-    flush_tlb_all();
-
-    balloon_unlock(flags);
+       pgd_t         *pgd; 
+       pud_t         *pud; 
+       pmd_t         *pmd;
+       pte_t         *pte;
+       unsigned long  mfn, i, flags;
+
+       scrub_pages(vstart, 1 << order);
+
+       balloon_lock(flags);
+
+       contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
+
+       /* 1. Zap current PTEs, giving away the underlying pages. */
+       for (i = 0; i < (1<<order); i++) {
+               pgd = pgd_offset_k(vstart + (i*PAGE_SIZE));
+               pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+               pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+               pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE)));
+               mfn = pte_mfn(*pte);
+               BUG_ON(HYPERVISOR_update_va_mapping(
+                       vstart + (i*PAGE_SIZE), __pte_ma(0), 0));
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+                       INVALID_P2M_ENTRY;
+               BUG_ON(HYPERVISOR_dom_mem_op(
+                       MEMOP_decrease_reservation, &mfn, 1, 0) != 1);
+       }
+
+       /* 2. Map new pages in place of old pages. */
+       for (i = 0; i < (1<<order); i++) {
+               BUG_ON(HYPERVISOR_dom_mem_op(
+                       MEMOP_increase_reservation, &mfn, 1, 0) != 1);
+               BUG_ON(HYPERVISOR_update_va_mapping(
+                       vstart + (i*PAGE_SIZE),
+                       __pte_ma((mfn<<PAGE_SHIFT)|__PAGE_KERNEL), 0));
+               xen_machphys_update(mfn, (__pa(vstart)>>PAGE_SHIFT)+i);
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] = mfn;
+       }
+
+       flush_tlb_all();
+
+       balloon_unlock(flags);
 }
 
 
 unsigned long allocate_empty_lowmem_region(unsigned long pages)
 {
-    pgd_t         *pgd;
-    pud_t         *pud; 
-    pmd_t         *pmd;
-    pte_t         *pte;
-    unsigned long *pfn_array;
-    unsigned long  vstart;
-    unsigned long  i;
-    unsigned int   order = get_order(pages*PAGE_SIZE);
-
-    vstart = __get_free_pages(GFP_KERNEL, order);
-    if ( vstart == 0 )
-        return 0UL;
-
-    scrub_pages(vstart, 1 << order);
-
-    pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
-    if ( pfn_array == NULL )
-        BUG();
-
-    for ( i = 0; i < (1<<order); i++ )
-    {
-        pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
-        pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
-        pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
-        pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
-        pfn_array[i] = pte_mfn(*pte);
+       pgd_t         *pgd;
+       pud_t         *pud; 
+       pmd_t         *pmd;
+       pte_t         *pte;
+       unsigned long *pfn_array;
+       unsigned long  vstart;
+       unsigned long  i;
+       unsigned int   order = get_order(pages*PAGE_SIZE);
+
+       vstart = __get_free_pages(GFP_KERNEL, order);
+       if (vstart == 0)
+               return 0UL;
+
+       scrub_pages(vstart, 1 << order);
+
+       pfn_array = vmalloc((1<<order) * sizeof(*pfn_array));
+       BUG_ON(pfn_array == NULL);
+
+       for (i = 0; i < (1<<order); i++) {
+               pgd = pgd_offset_k(   (vstart + (i*PAGE_SIZE)));
+               pud = pud_offset(pgd, (vstart + (i*PAGE_SIZE)));
+               pmd = pmd_offset(pud, (vstart + (i*PAGE_SIZE)));
+               pte = pte_offset_kernel(pmd, (vstart + (i*PAGE_SIZE))); 
+               pfn_array[i] = pte_mfn(*pte);
 #ifdef CONFIG_X86_64
-        xen_l1_entry_update(pte, __pte(0));
+               xen_l1_entry_update(pte, __pte(0));
 #else
-        BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 
-                                           __pte_ma(0), 0));
-#endif
-        phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
-            INVALID_P2M_ENTRY;
-    }
-
-    flush_tlb_all();
-
-    balloon_put_pages(pfn_array, 1 << order);
-
-    vfree(pfn_array);
-
-    return vstart;
+               BUG_ON(HYPERVISOR_update_va_mapping(vstart + (i*PAGE_SIZE), 
+                                                   __pte_ma(0), 0));
+#endif
+               phys_to_machine_mapping[(__pa(vstart)>>PAGE_SHIFT)+i] =
+                       INVALID_P2M_ENTRY;
+       }
+
+       flush_tlb_all();
+
+       balloon_put_pages(pfn_array, 1 << order);
+
+       vfree(pfn_array);
+
+       return vstart;
 }
 
 EXPORT_SYMBOL(allocate_empty_lowmem_region);
+
+/*
+ * Local variables:
+ *  c-file-style: "linux"
+ *  indent-tabs-mode: t
+ *  c-indent-level: 8
+ *  c-basic-offset: 8
+ *  tab-width: 8
+ * End:
+ */
diff -r aa8abf007260 -r 1a94949348ff 
linux-2.6-xen-sparse/arch/xen/i386/mm/init.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c      Thu Aug 18 10:22:08 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/init.c      Thu Aug 18 10:41:55 2005
@@ -41,6 +41,8 @@
 #include <asm/sections.h>
 #include <asm-xen/hypervisor.h>
 
+extern unsigned long *contiguous_bitmap;
+
 #if defined(CONFIG_SWIOTLB)
 extern void swiotlb_init(void);
 int swiotlb;
@@ -636,6 +638,11 @@
        int tmp;
        int bad_ppro;
        unsigned long pfn;
+
+       contiguous_bitmap = alloc_bootmem_low_pages(
+               (max_low_pfn + 2*BITS_PER_LONG) >> 3);
+       BUG_ON(!contiguous_bitmap);
+       memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
 
 #if defined(CONFIG_SWIOTLB)
        swiotlb_init(); 
diff -r aa8abf007260 -r 1a94949348ff 
linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c
--- a/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c    Thu Aug 18 10:22:08 2005
+++ b/linux-2.6-xen-sparse/arch/xen/x86_64/mm/init.c    Thu Aug 18 10:41:55 2005
@@ -39,6 +39,12 @@
 #include <asm/mmu_context.h>
 #include <asm/proto.h>
 #include <asm/smp.h>
+
+extern unsigned long *contiguous_bitmap;
+
+#if defined(CONFIG_SWIOTLB)
+extern void swiotlb_init(void);
+#endif
 
 #ifndef Dprintk
 #define Dprintk(x...)
@@ -794,8 +800,12 @@
        int codesize, reservedpages, datasize, initsize;
        int tmp;
 
+       contiguous_bitmap = alloc_bootmem_low_pages(
+               (end_pfn + 2*BITS_PER_LONG) >> 3);
+       BUG_ON(!contiguous_bitmap);
+       memset(contiguous_bitmap, 0, (end_pfn + 2*BITS_PER_LONG) >> 3);
+
 #if defined(CONFIG_SWIOTLB)
-       extern void swiotlb_init(void);
        swiotlb_init(); 
 #endif
 
diff -r aa8abf007260 -r 1a94949348ff 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h       Thu Aug 
18 10:22:08 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/dma-mapping.h       Thu Aug 
18 10:41:55 2005
@@ -26,7 +26,9 @@
 static inline int
 range_straddles_page_boundary(void *p, size_t size)
 {
-       return ((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE);
+       extern unsigned long *contiguous_bitmap;
+       return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
+               !test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
 }
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.