[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] merge?



# HG changeset patch
# User cl349@xxxxxxxxxxxxxxxxxxxx
# Node ID 2704a88c329598a92bc349869df461363e509b8f
# Parent  22c30df92b11f37e1f266976d0667d7c8cb02783
# Parent  4cdf880c94633aadff4d4d45f7a88d7e97f0e9dd
merge?

diff -r 22c30df92b11 -r 2704a88c3295 
linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64
--- a/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64        Fri Sep 
 9 08:56:14 2005
+++ b/linux-2.6-xen-sparse/arch/xen/configs/xen_defconfig_x86_64        Fri Sep 
 9 08:56:38 2005
@@ -2202,7 +2202,7 @@
 CONFIG_PROC_FS=y
 CONFIG_PROC_KCORE=y
 CONFIG_SYSFS=y
-# CONFIG_DEVFS_FS is not set
+CONFIG_DEVFS_FS=y
 CONFIG_DEVPTS_FS_XATTR=y
 CONFIG_DEVPTS_FS_SECURITY=y
 CONFIG_TMPFS=y
diff -r 22c30df92b11 -r 2704a88c3295 
linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c
--- a/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Fri Sep  9 08:56:14 2005
+++ b/linux-2.6-xen-sparse/arch/xen/i386/mm/ioremap.c   Fri Sep  9 08:56:38 2005
@@ -22,13 +22,14 @@
 #define ISA_START_ADDRESS      0x0
 #define ISA_END_ADDRESS                0x100000
 
+#if 0 /* not PAE safe */
 /* These hacky macros avoid phys->machine translations. */
 #define __direct_pte(x) ((pte_t) { (x) } )
 #define __direct_mk_pte(page_nr,pgprot) \
   __direct_pte(((page_nr) << PAGE_SHIFT) | pgprot_val(pgprot))
 #define direct_mk_pte_phys(physpage, pgprot) \
   __direct_mk_pte((physpage) >> PAGE_SHIFT, pgprot)
-
+#endif
 
 static int direct_remap_area_pte_fn(pte_t *pte, 
                                    struct page *pte_page,
@@ -37,16 +38,16 @@
 {
        mmu_update_t **v = (mmu_update_t **)data;
 
-       (*v)->ptr = ((maddr_t)pfn_to_mfn(page_to_pfn(pte_page)) <<
+       (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pte_page)) <<
                     PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
        (*v)++;
 
        return 0;
 }
 
-int direct_remap_area_pages(struct mm_struct *mm,
+int direct_remap_pfn_range(struct mm_struct *mm,
                            unsigned long address, 
-                           unsigned long machine_addr,
+                           unsigned long mfn,
                            unsigned long size, 
                            pgprot_t prot,
                            domid_t  domid)
@@ -77,9 +78,9 @@
                 * Fill in the machine address: PTE ptr is done later by
                 * __direct_remap_area_pages(). 
                 */
-               v->val = pte_val_ma(pfn_pte_ma(machine_addr >> PAGE_SHIFT, 
prot));
-
-               machine_addr += PAGE_SIZE;
+               v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
+
+               mfn++;
                address += PAGE_SIZE; 
                v++;
        }
@@ -97,8 +98,10 @@
        return 0;
 }
 
-EXPORT_SYMBOL(direct_remap_area_pages);
-
+EXPORT_SYMBOL(direct_remap_pfn_range);
+
+
+/* FIXME: This is horribly broken on PAE */ 
 static int lookup_pte_fn(
        pte_t *pte, struct page *pte_page, unsigned long addr, void *data)
 {
@@ -218,7 +221,7 @@
 #ifdef __x86_64__
        flags |= _PAGE_USER;
 #endif
-       if (direct_remap_area_pages(&init_mm, (unsigned long) addr, phys_addr,
+       if (direct_remap_pfn_range(&init_mm, (unsigned long) addr, 
phys_addr>>PAGE_SHIFT,
                                    size, __pgprot(flags), domid)) {
                vunmap((void __force *) addr);
                return NULL;
diff -r 22c30df92b11 -r 2704a88c3295 
linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c      Fri Sep  9 
08:56:14 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/blkfront/blkfront.c      Fri Sep  9 
08:56:38 2005
@@ -182,7 +182,7 @@
 static int blkif_queue_request(struct request *req)
 {
        struct blkfront_info *info = req->rq_disk->private_data;
-       unsigned long buffer_ma;
+       unsigned long buffer_mfn;
        blkif_request_t *ring_req;
        struct bio *bio;
        struct bio_vec *bvec;
@@ -221,7 +221,7 @@
                bio_for_each_segment (bvec, bio, idx) {
                        BUG_ON(ring_req->nr_segments
                               == BLKIF_MAX_SEGMENTS_PER_REQUEST);
-                       buffer_ma = page_to_phys(bvec->bv_page);
+                       buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
                        fsect = bvec->bv_offset >> 9;
                        lsect = fsect + (bvec->bv_len >> 9) - 1;
                        /* install a grant reference. */
@@ -231,11 +231,11 @@
                        gnttab_grant_foreign_access_ref(
                                ref,
                                info->backend_id,
-                               buffer_ma >> PAGE_SHIFT,
+                               buffer_mfn,
                                rq_data_dir(req) );
 
                        info->shadow[id].frame[ring_req->nr_segments] =
-                               buffer_ma >> PAGE_SHIFT;
+                               buffer_mfn;
 
                        ring_req->frame_and_sects[ring_req->nr_segments] =
                                blkif_fas_from_gref(ref, fsect, lsect);
diff -r 22c30df92b11 -r 2704a88c3295 
linux-2.6-xen-sparse/drivers/xen/netback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Fri Sep  9 
08:56:14 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/interface.c      Fri Sep  9 
08:56:38 2005
@@ -154,12 +154,12 @@
     pgprot_t      prot = __pgprot(_KERNPG_TABLE);
     int           err;
 
-    err = direct_remap_area_pages(&init_mm, localaddr,
-                                 tx_ring_ref<<PAGE_SHIFT, PAGE_SIZE,
+    err = direct_remap_pfn_range(&init_mm, localaddr,
+                                 tx_ring_ref, PAGE_SIZE,
                                  prot, netif->domid); 
     
-    err |= direct_remap_area_pages(&init_mm, localaddr + PAGE_SIZE,
-                                 rx_ring_ref<<PAGE_SHIFT, PAGE_SIZE,
+    err |= direct_remap_pfn_range(&init_mm, localaddr + PAGE_SIZE,
+                                 rx_ring_ref, PAGE_SIZE,
                                  prot, netif->domid);
 
     if (err)
diff -r 22c30df92b11 -r 2704a88c3295 
linux-2.6-xen-sparse/drivers/xen/netback/netback.c
--- a/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Fri Sep  9 
08:56:14 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netback/netback.c        Fri Sep  9 
08:56:38 2005
@@ -297,7 +297,7 @@
         mmuext->mfn = old_mfn;
         mmuext++;
 #endif
-        mmu->ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
+        mmu->ptr = ((unsigned long long)new_mfn << PAGE_SHIFT) | 
MMU_MACHPHYS_UPDATE;
         mmu->val = __pa(vdata) >> PAGE_SHIFT;  
         mmu++;
 
diff -r 22c30df92b11 -r 2704a88c3295 
linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c
--- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Fri Sep  9 
08:56:14 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c      Fri Sep  9 
08:56:38 2005
@@ -618,7 +618,7 @@
 
         /* Remap the page. */
 #ifdef CONFIG_XEN_NETDEV_GRANT
-        mmu->ptr = mfn << PAGE_SHIFT | MMU_MACHPHYS_UPDATE;
+        mmu->ptr = ((unsigned long long)mfn << PAGE_SHIFT) | 
MMU_MACHPHYS_UPDATE;
 #else
         mmu->ptr  = (rx->addr & PAGE_MASK) | MMU_MACHPHYS_UPDATE;
 #endif
diff -r 22c30df92b11 -r 2704a88c3295 
linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c
--- a/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c        Fri Sep  9 
08:56:14 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/privcmd/privcmd.c        Fri Sep  9 
08:56:38 2005
@@ -116,9 +116,9 @@
                 if ( (msg[j].va + (msg[j].npages<<PAGE_SHIFT)) > vma->vm_end )
                     return -EINVAL;
 
-                if ( (rc = direct_remap_area_pages(vma->vm_mm, 
+                if ( (rc = direct_remap_pfn_range(vma->vm_mm, 
                                                    msg[j].va&PAGE_MASK, 
-                                                   msg[j].mfn<<PAGE_SHIFT, 
+                                                   msg[j].mfn, 
                                                    msg[j].npages<<PAGE_SHIFT, 
                                                    vma->vm_page_prot,
                                                    mmapcmd.dom)) < 0 )
diff -r 22c30df92b11 -r 2704a88c3295 
linux-2.6-xen-sparse/drivers/xen/usbback/interface.c
--- a/linux-2.6-xen-sparse/drivers/xen/usbback/interface.c      Fri Sep  9 
08:56:14 2005
+++ b/linux-2.6-xen-sparse/drivers/xen/usbback/interface.c      Fri Sep  9 
08:56:38 2005
@@ -161,8 +161,8 @@
     }
 
     prot = __pgprot(_KERNPG_TABLE);
-    error = direct_remap_area_pages(&init_mm, VMALLOC_VMADDR(vma->addr),
-                                    shmem_frame<<PAGE_SHIFT, PAGE_SIZE,
+    error = direct_remap_pfn_range(&init_mm, VMALLOC_VMADDR(vma->addr),
+                                    shmem_frame, PAGE_SIZE,
                                     prot, domid);
     if ( error != 0 )
     {
diff -r 22c30df92b11 -r 2704a88c3295 
linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h   Fri Sep  9 
08:56:14 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-i386/pgtable.h   Fri Sep  9 
08:56:38 2005
@@ -460,9 +460,9 @@
 #define kern_addr_valid(addr)  (1)
 #endif /* !CONFIG_DISCONTIGMEM */
 
-int direct_remap_area_pages(struct mm_struct *mm,
+int direct_remap_pfn_range(struct mm_struct *mm,
                             unsigned long address, 
-                            unsigned long machine_addr,
+                            unsigned long mfn,
                             unsigned long size, 
                             pgprot_t prot,
                             domid_t  domid);
@@ -474,10 +474,10 @@
                     unsigned long size);
 
 #define io_remap_page_range(vma,from,phys,size,prot) \
-direct_remap_area_pages(vma->vm_mm,from,phys,size,prot,DOMID_IO)
+direct_remap_pfn_range(vma->vm_mm,from,phys>>PAGE_SHIFT,size,prot,DOMID_IO)
 
 #define io_remap_pfn_range(vma,from,pfn,size,prot) \
-direct_remap_area_pages(vma->vm_mm,from,pfn<<PAGE_SHIFT,size,prot,DOMID_IO)
+direct_remap_pfn_range(vma->vm_mm,from,pfn,size,prot,DOMID_IO)
 
 #define MK_IOSPACE_PFN(space, pfn)     (pfn)
 #define GET_IOSPACE(pfn)               0
diff -r 22c30df92b11 -r 2704a88c3295 
linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h
--- a/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Fri Sep  9 
08:56:14 2005
+++ b/linux-2.6-xen-sparse/include/asm-xen/asm-x86_64/pgtable.h Fri Sep  9 
08:56:38 2005
@@ -526,28 +526,26 @@
 
 #define DOMID_LOCAL (0xFFFFU)
 
-int direct_remap_area_pages(struct mm_struct *mm,
+int direct_remap_pfn_range(struct mm_struct *mm,
                             unsigned long address,
-                            unsigned long machine_addr,
+                            unsigned long mfn,
                             unsigned long size,
                             pgprot_t prot,
                             domid_t  domid);
-int __direct_remap_area_pages(struct mm_struct *mm,
-                              unsigned long address,
-                              unsigned long size,
-                              mmu_update_t *v);
+
 int create_lookup_pte_addr(struct mm_struct *mm,
                            unsigned long address,
                            unsigned long *ptep);
+
 int touch_pte_range(struct mm_struct *mm,
                     unsigned long address,
                     unsigned long size);
 
 #define io_remap_page_range(vma, vaddr, paddr, size, prot)             \
-               
direct_remap_area_pages((vma)->vm_mm,vaddr,paddr,size,prot,DOMID_IO)
+               
direct_remap_pfn_range((vma)->vm_mm,vaddr,paddr>>PAGE_SHIFT,size,prot,DOMID_IO)
 
 #define io_remap_pfn_range(vma, vaddr, pfn, size, prot)                \
-               
direct_remap_area_pages((vma)->vm_mm,vaddr,(pfn)<<PAGE_SHIFT,size,prot,DOMID_IO)
+               
direct_remap_pfn_range((vma)->vm_mm,vaddr,pfn,size,prot,DOMID_IO)
 
 #define MK_IOSPACE_PFN(space, pfn)     (pfn)
 #define GET_IOSPACE(pfn)               0
diff -r 22c30df92b11 -r 2704a88c3295 tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Fri Sep  9 08:56:14 2005
+++ b/tools/libxc/xc_domain.c   Fri Sep  9 08:56:38 2005
@@ -262,23 +262,66 @@
 
 int xc_domain_memory_increase_reservation(int xc_handle,
                                           u32 domid, 
-                                          unsigned int mem_kb)
+                                          unsigned long nr_extents,
+                                          unsigned int extent_order,
+                                          unsigned int address_bits,
+                                         unsigned long *extent_start)
 {
     int err;
-    unsigned int npages = mem_kb / (PAGE_SIZE/1024);
     struct xen_memory_reservation reservation = {
-        .nr_extents   = npages,
-        .extent_order = 0,
+        .extent_start = extent_start, /* may be NULL */
+        .nr_extents   = nr_extents,
+        .extent_order = extent_order,  
+        .address_bits = address_bits,
         .domid        = domid
     };
 
     err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation);
-    if (err == npages)
+    if (err == nr_extents)
         return 0;
 
     if (err > 0) {
+        fprintf(stderr,"Failed alocation for dom %d : %ld pages order %d 
addr_bits %d\n",
+                                 domid, nr_extents, extent_order, 
address_bits);
         errno = ENOMEM;
         err = -1;
     }
     return err;
 }
+
+int xc_domain_memory_decrease_reservation(int xc_handle,
+                                          u32 domid, 
+                                          unsigned long nr_extents,
+                                          unsigned int extent_order,
+                                         unsigned long *extent_start)
+{
+    int err;
+    struct xen_memory_reservation reservation = {
+        .extent_start = extent_start, 
+        .nr_extents   = nr_extents,
+        .extent_order = extent_order,  
+        .address_bits = 0,
+        .domid        = domid
+    };
+
+    if (extent_start == NULL)
+    {
+        fprintf(stderr,"decrease_reservation extent_start is NULL!\n");
+        errno = EINVAL;
+        err = -1;
+       goto out;
+    }
+
+    err = xc_memory_op(xc_handle, XENMEM_increase_reservation, &reservation);
+    if (err == nr_extents)
+        return 0;
+
+    if (err > 0) {
+        fprintf(stderr,"Failed de-alocation for dom %d : %ld pages order %d\n",
+                                 domid, nr_extents, extent_order);
+        errno = EBUSY;
+        err = -1;
+    }
+out:
+    return err;
+}
diff -r 22c30df92b11 -r 2704a88c3295 tools/libxc/xc_linux_build.c
--- a/tools/libxc/xc_linux_build.c      Fri Sep  9 08:56:14 2005
+++ b/tools/libxc/xc_linux_build.c      Fri Sep  9 08:56:38 2005
@@ -57,7 +57,7 @@
 }
 
 #define alloc_pt(ltab, vltab) \
-        ltab = page_array[ppt_alloc++] << PAGE_SHIFT; \
+        ltab = (unsigned long long)(page_array[ppt_alloc++]) << PAGE_SHIFT; \
         if (vltab != NULL) { \
             munmap(vltab, PAGE_SIZE); \
         } \
@@ -128,18 +128,37 @@
     l1_pgentry_64_t *vl1tab=NULL, *vl1e=NULL;
     l2_pgentry_64_t *vl2tab=NULL, *vl2e=NULL;
     l3_pgentry_64_t *vl3tab=NULL, *vl3e=NULL;
-    unsigned long l1tab = 0;
-    unsigned long l2tab = 0;
-    unsigned long l3tab = 0;
+    unsigned long long l1tab = 0;
+    unsigned long long l2tab = 0;
+    unsigned long long l3tab = 0;
     unsigned long ppt_alloc;
     unsigned long count;
 
     /* First allocate page for page dir. */
     ppt_alloc = (vpt_start - dsi_v_start) >> PAGE_SHIFT;
+
+    if ( page_array[ppt_alloc] > 0xfffff )
+    {
+       unsigned long nmfn;
+       nmfn = xc_make_page_below_4G( xc_handle, dom, page_array[ppt_alloc] );
+       if ( nmfn == 0 )
+       {
+           fprintf(stderr, "Couldn't get a page below 4GB :-(\n");
+           goto error_out;
+       }
+       page_array[ppt_alloc] = nmfn;
+    }
+
     alloc_pt(l3tab, vl3tab);
     vl3e = &vl3tab[l3_table_offset_pae(dsi_v_start)];
     ctxt->ctrlreg[3] = l3tab;
-    
+
+    if(l3tab>0xfffff000ULL)
+    {
+        fprintf(stderr,"L3TAB = %llx above 4GB!\n",l3tab);
+        goto error_out;
+    }
+ 
     for ( count = 0; count < ((v_end-dsi_v_start)>>PAGE_SHIFT); count++)
     {
         if ( !((unsigned long)vl1e & (PAGE_SIZE-1)) )
@@ -525,12 +544,14 @@
     physmap = physmap_e = xc_map_foreign_range(
         xc_handle, dom, PAGE_SIZE, PROT_READ|PROT_WRITE,
         page_array[physmap_pfn++]);
+
     for ( count = 0; count < nr_pages; count++ )
     {
         if ( xc_add_mmu_update(xc_handle, mmu,
-                              (page_array[count] << PAGE_SHIFT) | 
+                              ((unsigned long long)page_array[count] << 
PAGE_SHIFT) | 
                               MMU_MACHPHYS_UPDATE, count) )
         {
+            fprintf(stderr,"m2p update failure p=%lx 
m=%lx\n",count,page_array[count] ); 
             munmap(physmap, PAGE_SIZE);
             goto error_out;
         }
diff -r 22c30df92b11 -r 2704a88c3295 tools/libxc/xc_linux_restore.c
--- a/tools/libxc/xc_linux_restore.c    Fri Sep  9 08:56:14 2005
+++ b/tools/libxc/xc_linux_restore.c    Fri Sep  9 08:56:38 2005
@@ -149,9 +149,9 @@
     }
 
     err = xc_domain_memory_increase_reservation(xc_handle, dom,
-                                                nr_pfns * PAGE_SIZE / 1024);
+                                                nr_pfns, 0, 0, NULL);
     if (err != 0) {
-        ERR("Failed to increate reservation by %lx\n", 
+        ERR("Failed to increase reservation by %lx\n", 
             nr_pfns * PAGE_SIZE / 1024); 
         errno = ENOMEM;
         goto out;
diff -r 22c30df92b11 -r 2704a88c3295 tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Fri Sep  9 08:56:14 2005
+++ b/tools/libxc/xc_private.c  Fri Sep  9 08:56:38 2005
@@ -116,7 +116,7 @@
 
     if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
     {
-       fprintf(stderr, "Dom_mem operation failed (rc=%ld errno=%d)-- need to"
+       fprintf(stderr, "Dom_mmuext operation failed (rc=%ld errno=%d)-- need 
to"
                     " rebuild the user-space tool set?\n",ret,errno);
     }
 
@@ -172,7 +172,7 @@
 }
 
 int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu, 
-                     unsigned long ptr, unsigned long val)
+                     unsigned long long ptr, unsigned long long val)
 {
     mmu->updates[mmu->idx].ptr = ptr;
     mmu->updates[mmu->idx].val = val;
@@ -229,7 +229,7 @@
 
     if ( (ret = do_xen_hypercall(xc_handle, &hypercall)) < 0 )
     {
-       fprintf(stderr, "Dom_mem operation failed (rc=%ld errno=%d)-- need to"
+       fprintf(stderr, "hypercall failed (rc=%ld errno=%d)-- need to"
                 " rebuild the user-space tool set?\n",ret,errno);
     }
 
@@ -427,3 +427,21 @@
 {
     return do_xen_version(xc_handle, cmd, arg);
 }
+
+unsigned long xc_make_page_below_4G(int xc_handle, u32 domid, 
+                                   unsigned long mfn)
+{
+    unsigned long new_mfn;
+    if ( xc_domain_memory_decrease_reservation( 
+       xc_handle, domid, 1, 0, &mfn ) != 1 )
+    {
+       fprintf(stderr,"xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn);
+       return 0;
+    }
+    if ( xc_domain_memory_increase_reservation( xc_handle, domid, 1, 0, 32, 
&new_mfn ) != 1 )
+    {
+       fprintf(stderr,"xc_make_page_below_4G increase failed. mfn=%lx\n",mfn);
+       return 0;
+    }
+    return new_mfn;
+}
diff -r 22c30df92b11 -r 2704a88c3295 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Fri Sep  9 08:56:14 2005
+++ b/tools/libxc/xenctrl.h     Fri Sep  9 08:56:38 2005
@@ -387,7 +387,19 @@
 
 int xc_domain_memory_increase_reservation(int xc_handle,
                                           u32 domid, 
-                                          unsigned int mem_kb);
+                                          unsigned long nr_extents,
+                                          unsigned int extent_order,
+                                          unsigned int address_bits,
+                                         unsigned long *extent_start);
+
+int xc_domain_memory_decrease_reservation(int xc_handle,
+                                          u32 domid, 
+                                          unsigned long nr_extents,
+                                          unsigned int extent_order,
+                                         unsigned long *extent_start);
+
+unsigned long xc_make_page_below_4G(int xc_handle, u32 domid, 
+                                   unsigned long mfn);
 
 typedef dom0_perfc_desc_t xc_perfc_desc_t;
 /* IMPORTANT: The caller is responsible for mlock()'ing the @desc array. */
@@ -521,7 +533,7 @@
 typedef struct xc_mmu xc_mmu_t;
 xc_mmu_t *xc_init_mmu_updates(int xc_handle, domid_t dom);
 int xc_add_mmu_update(int xc_handle, xc_mmu_t *mmu, 
-                   unsigned long ptr, unsigned long val);
+                   unsigned long long ptr, unsigned long long val);
 int xc_finish_mmu_updates(int xc_handle, xc_mmu_t *mmu);
 
 #endif
diff -r 22c30df92b11 -r 2704a88c3295 tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Fri Sep  9 08:56:14 2005
+++ b/tools/python/xen/lowlevel/xc/xc.c Fri Sep  9 08:56:38 2005
@@ -841,14 +841,21 @@
 
     u32 dom;
     unsigned long mem_kb;
-
-    static char *kwd_list[] = { "dom", "mem_kb", NULL };
-
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "ii", kwd_list, 
-                                      &dom, &mem_kb) )
-        return NULL;
-
-    if ( xc_domain_memory_increase_reservation(xc->xc_handle, dom, mem_kb) )
+    unsigned int extent_order = 0 , address_bits = 0;
+    unsigned long nr_extents;
+
+    static char *kwd_list[] = { "dom", "mem_kb", "extent_order", 
"address_bits", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "il|ii", kwd_list, 
+                                      &dom, &mem_kb, &extent_order, 
&address_bits) )
+        return NULL;
+
+    /* round down to nearest power of 2. Assume callers using extent_order>0
+       know what they are doing */
+    nr_extents = (mem_kb / (XC_PAGE_SIZE/1024)) >> extent_order;
+    if ( xc_domain_memory_increase_reservation(xc->xc_handle, dom, 
+                                              nr_extents, extent_order, 
+                                              address_bits, NULL) )
         return PyErr_SetFromErrno(xc_error);
     
     Py_INCREF(zero);
diff -r 22c30df92b11 -r 2704a88c3295 tools/python/xen/xend/image.py
--- a/tools/python/xen/xend/image.py    Fri Sep  9 08:56:14 2005
+++ b/tools/python/xen/xend/image.py    Fri Sep  9 08:56:38 2005
@@ -159,7 +159,12 @@
         xc.domain_setmaxmem(dom, mem_kb)
 
         try:
-            xc.domain_memory_increase_reservation(dom, mem_kb)
+            # Give the domain some memory below 4GB
+            lmem_kb = 0
+            if lmem_kb > 0:
+                xc.domain_memory_increase_reservation(dom, 
min(lmem_kb,mem_kb), 0, 32)
+            if mem_kb > lmem_kb:
+                xc.domain_memory_increase_reservation(dom, mem_kb-lmem_kb, 0, 
0)
         except:
             xc.domain_destroy(dom)
             raise
diff -r 22c30df92b11 -r 2704a88c3295 xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Fri Sep  9 08:56:14 2005
+++ b/xen/arch/x86/domain.c     Fri Sep  9 08:56:38 2005
@@ -381,11 +381,13 @@
 out:
     free_vmcs(vmcs);
     if(v->arch.arch_vmx.io_bitmap_a != 0) {
-        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
+        free_xenheap_pages(
+            v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
         v->arch.arch_vmx.io_bitmap_a = 0;
     }
     if(v->arch.arch_vmx.io_bitmap_b != 0) {
-        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
+        free_xenheap_pages(
+            v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
         v->arch.arch_vmx.io_bitmap_b = 0;
     }
     v->arch.arch_vmx.vmcs = 0;
@@ -972,11 +974,13 @@
     BUG_ON(v->arch.arch_vmx.vmcs == NULL);
     free_vmcs(v->arch.arch_vmx.vmcs);
     if(v->arch.arch_vmx.io_bitmap_a != 0) {
-        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_a, get_order(0x1000));
+        free_xenheap_pages(
+            v->arch.arch_vmx.io_bitmap_a, get_order_from_bytes(0x1000));
         v->arch.arch_vmx.io_bitmap_a = 0;
     }
     if(v->arch.arch_vmx.io_bitmap_b != 0) {
-        free_xenheap_pages(v->arch.arch_vmx.io_bitmap_b, get_order(0x1000));
+        free_xenheap_pages(
+            v->arch.arch_vmx.io_bitmap_b, get_order_from_bytes(0x1000));
         v->arch.arch_vmx.io_bitmap_b = 0;
     }
     v->arch.arch_vmx.vmcs = 0;
diff -r 22c30df92b11 -r 2704a88c3295 xen/arch/x86/domain_build.c
--- a/xen/arch/x86/domain_build.c       Fri Sep  9 08:56:14 2005
+++ b/xen/arch/x86/domain_build.c       Fri Sep  9 08:56:38 2005
@@ -75,15 +75,12 @@
     struct pfn_info *page;
     unsigned int order;
     /*
-     * Allocate up to 2MB at a time:
-     *  1. This prevents overflow of get_order() when allocating more than
-     *     4GB to domain 0 on a PAE machine.
-     *  2. It prevents allocating very large chunks from DMA pools before
-     *     the >4GB pool is fully depleted.
+     * Allocate up to 2MB at a time: It prevents allocating very large chunks
+     * from DMA pools before the >4GB pool is fully depleted.
      */
     if ( max_pages > (2UL << (20 - PAGE_SHIFT)) )
         max_pages = 2UL << (20 - PAGE_SHIFT);
-    order = get_order(max_pages << PAGE_SHIFT);
+    order = get_order_from_pages(max_pages);
     if ( (max_pages & (max_pages-1)) != 0 )
         order--;
     while ( (page = alloc_domheap_pages(d, order, 0)) == NULL )
@@ -252,7 +249,7 @@
 #endif
     }
 
-    order = get_order(v_end - dsi.v_start);
+    order = get_order_from_bytes(v_end - dsi.v_start);
     if ( (1UL << order) > nr_pages )
         panic("Domain 0 allocation is too small for kernel image.\n");
 
diff -r 22c30df92b11 -r 2704a88c3295 xen/arch/x86/vmx_vmcs.c
--- a/xen/arch/x86/vmx_vmcs.c   Fri Sep  9 08:56:14 2005
+++ b/xen/arch/x86/vmx_vmcs.c   Fri Sep  9 08:56:38 2005
@@ -44,7 +44,7 @@
 
     rdmsr(MSR_IA32_VMX_BASIC_MSR, vmx_msr_low, vmx_msr_high);
     vmcs_size = vmx_msr_high & 0x1fff;
-    vmcs = alloc_xenheap_pages(get_order(vmcs_size)); 
+    vmcs = alloc_xenheap_pages(get_order_from_bytes(vmcs_size)); 
     memset((char *)vmcs, 0, vmcs_size); /* don't remove this */
 
     vmcs->vmcs_revision_id = vmx_msr_low;
@@ -55,7 +55,7 @@
 {
     int order;
 
-    order = get_order(vmcs_size);
+    order = get_order_from_bytes(vmcs_size);
     free_xenheap_pages(vmcs, order);
 }
 
@@ -76,8 +76,8 @@
     error |= __vmwrite(VM_ENTRY_CONTROLS, MONITOR_VM_ENTRY_CONTROLS);
 
     /* need to use 0x1000 instead of PAGE_SIZE */
-    io_bitmap_a = (void*) alloc_xenheap_pages(get_order(0x1000)); 
-    io_bitmap_b = (void*) alloc_xenheap_pages(get_order(0x1000)); 
+    io_bitmap_a = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 
+    io_bitmap_b = (void*) alloc_xenheap_pages(get_order_from_bytes(0x1000)); 
     memset(io_bitmap_a, 0xff, 0x1000);
     /* don't bother debug port access */
     clear_bit(PC_DEBUG_PORT, io_bitmap_a);
diff -r 22c30df92b11 -r 2704a88c3295 xen/arch/x86/x86_32/mm.c
--- a/xen/arch/x86/x86_32/mm.c  Fri Sep  9 08:56:14 2005
+++ b/xen/arch/x86/x86_32/mm.c  Fri Sep  9 08:56:38 2005
@@ -118,7 +118,8 @@
     }
 
     /* Set up mapping cache for domain pages. */
-    mapcache_order = get_order(MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
+    mapcache_order = get_order_from_bytes(
+        MAPCACHE_MBYTES << (20 - PAGETABLE_ORDER));
     mapcache = alloc_xenheap_pages(mapcache_order);
     memset(mapcache, 0, PAGE_SIZE << mapcache_order);
     for ( i = 0; i < (MAPCACHE_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
diff -r 22c30df92b11 -r 2704a88c3295 xen/common/grant_table.c
--- a/xen/common/grant_table.c  Fri Sep  9 08:56:14 2005
+++ b/xen/common/grant_table.c  Fri Sep  9 08:56:38 2005
@@ -399,7 +399,7 @@
     {
         int              i;
         grant_mapping_t *new_mt;
-        grant_table_t   *lgt      = ld->grant_table;
+        grant_table_t   *lgt = ld->grant_table;
 
         if ( (lgt->maptrack_limit << 1) > MAPTRACK_MAX_ENTRIES )
         {
@@ -437,9 +437,8 @@
             ref, dom, dev_hst_ro_flags);
 #endif
 
-    if ( 0 <= ( rc = __gnttab_activate_grant_ref( ld, led, rd, ref,
-                                                  dev_hst_ro_flags,
-                                                  addr, &frame)))
+    if ( (rc = __gnttab_activate_grant_ref(ld, led, rd, ref, dev_hst_ro_flags,
+                                           addr, &frame)) >= 0 )
     {
         /*
          * Only make the maptrack live _after_ writing the pte, in case we 
@@ -807,7 +806,8 @@
     int i;
     int result = GNTST_okay;
 
-    for (i = 0; i < count; i++) {
+    for ( i = 0; i < count; i++ )
+    {
         gnttab_donate_t *gop = &uop[i];
 #if GRANT_DEBUG
         printk("gnttab_donate: i=%d mfn=%lx domid=%d gref=%08x\n",
@@ -815,19 +815,24 @@
 #endif
         page = &frame_table[gop->mfn];
         
-        if (unlikely(IS_XEN_HEAP_FRAME(page))) { 
+        if ( unlikely(IS_XEN_HEAP_FRAME(page)))
+        { 
             printk("gnttab_donate: xen heap frame mfn=%lx\n", 
                    (unsigned long) gop->mfn);
             gop->status = GNTST_bad_virt_addr;
             continue;
         }
-        if (unlikely(!pfn_valid(page_to_pfn(page)))) {
+        
+        if ( unlikely(!pfn_valid(page_to_pfn(page))) )
+        {
             printk("gnttab_donate: invalid pfn for mfn=%lx\n", 
                    (unsigned long) gop->mfn);
             gop->status = GNTST_bad_virt_addr;
             continue;
         }
-        if (unlikely((e = find_domain_by_id(gop->domid)) == NULL)) {
+
+        if ( unlikely((e = find_domain_by_id(gop->domid)) == NULL) )
+        {
             printk("gnttab_donate: can't find domain %d\n", gop->domid);
             gop->status = GNTST_bad_domain;
             continue;
@@ -881,48 +886,23 @@
          * headroom.  Also, a domain mustn't have PGC_allocated
          * pages when it is dying.
          */
-#ifdef GRANT_DEBUG
-        if (unlikely(e->tot_pages >= e->max_pages)) {
-            printk("gnttab_dontate: no headroom tot_pages=%d max_pages=%d\n",
-                   e->tot_pages, e->max_pages);
+        if ( unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags)) ||
+             unlikely(e->tot_pages >= e->max_pages) ||
+             unlikely(!gnttab_prepare_for_transfer(e, d, gop->handle)) )
+        {
+            DPRINTK("gnttab_donate: Transferee has no reservation headroom "
+                    "(%d,%d) or provided a bad grant ref (%08x) or "
+                    "is dying (%lx)\n",
+                    e->tot_pages, e->max_pages, gop->handle, e->domain_flags);
             spin_unlock(&e->page_alloc_lock);
             put_domain(e);
             gop->status = result = GNTST_general_error;
             break;
         }
-        if (unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags))) {
-            printk("gnttab_donate: target domain is dying\n");
-            spin_unlock(&e->page_alloc_lock);
-            put_domain(e);
-            gop->status = result = GNTST_general_error;
-            break;
-        }
-        if (unlikely(!gnttab_prepare_for_transfer(e, d, gop->handle))) {
-            printk("gnttab_donate: gnttab_prepare_for_transfer fails.\n");
-            spin_unlock(&e->page_alloc_lock);
-            put_domain(e);
-            gop->status = result = GNTST_general_error;
-            break;
-        }
-#else
-        ASSERT(e->tot_pages <= e->max_pages);
-        if (unlikely(test_bit(DOMFLAGS_DYING, &e->domain_flags)) ||
-            unlikely(e->tot_pages == e->max_pages) ||
-            unlikely(!gnttab_prepare_for_transfer(e, d, gop->handle))) {
-            printk("gnttab_donate: Transferee has no reservation headroom (%d,"
-                   "%d) or provided a bad grant ref (%08x) or is dying (%p)\n",
-                   e->tot_pages, e->max_pages, gop->handle, e->d_flags);
-            spin_unlock(&e->page_alloc_lock);
-            put_domain(e);
-            /* XXX SMH: better error return here would be useful */
-            gop->status = result = GNTST_general_error;
-            break;
-        }
-#endif
+
         /* Okay, add the page to 'e'. */
-        if (unlikely(e->tot_pages++ == 0)) {
+        if ( unlikely(e->tot_pages++ == 0) )
             get_knownalive_domain(e);
-        }
         list_add_tail(&page->list, &e->page_list);
         page_set_owner(page, e);
         
@@ -938,6 +918,7 @@
         
         gop->status = GNTST_okay;
     }
+
     return result;
 }
 
@@ -957,38 +938,38 @@
     
     rc = -EFAULT;
     switch ( cmd )
-        {
-        case GNTTABOP_map_grant_ref:
-            if ( unlikely(!array_access_ok(
-                              uop, count, sizeof(gnttab_map_grant_ref_t))) )
-                goto out;
-            rc = gnttab_map_grant_ref((gnttab_map_grant_ref_t *)uop, count);
-            break;
-        case GNTTABOP_unmap_grant_ref:
-            if ( unlikely(!array_access_ok(
-                              uop, count, sizeof(gnttab_unmap_grant_ref_t))) )
-                goto out;
-            rc = gnttab_unmap_grant_ref((gnttab_unmap_grant_ref_t *)uop, 
-                                        count);
-            break;
-        case GNTTABOP_setup_table:
-            rc = gnttab_setup_table((gnttab_setup_table_t *)uop, count);
-            break;
+    {
+    case GNTTABOP_map_grant_ref:
+        if ( unlikely(!array_access_ok(
+            uop, count, sizeof(gnttab_map_grant_ref_t))) )
+            goto out;
+        rc = gnttab_map_grant_ref((gnttab_map_grant_ref_t *)uop, count);
+        break;
+    case GNTTABOP_unmap_grant_ref:
+        if ( unlikely(!array_access_ok(
+            uop, count, sizeof(gnttab_unmap_grant_ref_t))) )
+            goto out;
+        rc = gnttab_unmap_grant_ref(
+            (gnttab_unmap_grant_ref_t *)uop, count);
+        break;
+    case GNTTABOP_setup_table:
+        rc = gnttab_setup_table((gnttab_setup_table_t *)uop, count);
+        break;
 #if GRANT_DEBUG
-        case GNTTABOP_dump_table:
-            rc = gnttab_dump_table((gnttab_dump_table_t *)uop);
-            break;
+    case GNTTABOP_dump_table:
+        rc = gnttab_dump_table((gnttab_dump_table_t *)uop);
+        break;
 #endif
-        case GNTTABOP_donate:
-            if (unlikely(!array_access_ok(uop, count, 
-                                          sizeof(gnttab_donate_t))))
-                goto out;
-            rc = gnttab_donate(uop, count);
-            break;
-        default:
-            rc = -ENOSYS;
-            break;
-        }
+    case GNTTABOP_donate:
+        if (unlikely(!array_access_ok(
+            uop, count, sizeof(gnttab_donate_t))))
+            goto out;
+        rc = gnttab_donate(uop, count);
+        break;
+    default:
+        rc = -ENOSYS;
+        break;
+    }
     
   out:
     UNLOCK_BIGLOCK(d);
@@ -1021,17 +1002,17 @@
     lgt = ld->grant_table;
     
 #if GRANT_DEBUG_VERBOSE
-    if ( ld->domain_id != 0 ) {
-            DPRINTK("Foreign unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n",
-                    rd->domain_id, ld->domain_id, frame, readonly);
-      }
+    if ( ld->domain_id != 0 )
+        DPRINTK("Foreign unref rd(%d) ld(%d) frm(%lx) flgs(%x).\n",
+                rd->domain_id, ld->domain_id, frame, readonly);
 #endif
     
     /* Fast exit if we're not mapping anything using grant tables */
     if ( lgt->map_count == 0 )
         return 0;
     
-    if ( get_domain(rd) == 0 ) {
+    if ( get_domain(rd) == 0 )
+    {
         DPRINTK("gnttab_check_unmap: couldn't get_domain rd(%d)\n",
                 rd->domain_id);
         return 0;
@@ -1268,8 +1249,11 @@
     for ( i = 0; i < NR_GRANT_FRAMES; i++ )
     {
         SHARE_PFN_WITH_DOMAIN(
-            virt_to_page((char *)(t->shared)+(i*PAGE_SIZE)), d);
-        set_pfn_from_mfn((virt_to_phys(t->shared) >> PAGE_SHIFT) + i, 
INVALID_M2P_ENTRY);
+            virt_to_page((char *)t->shared + (i * PAGE_SIZE)),
+            d);
+        set_pfn_from_mfn(
+            (virt_to_phys(t->shared) >> PAGE_SHIFT) + i,
+            INVALID_M2P_ENTRY);
     }
 
     /* Okay, install the structure. */
@@ -1306,57 +1290,53 @@
     {
         map = &gt->maptrack[handle];
 
-        if ( map->ref_and_flags & GNTMAP_device_map )
-        {
-            dom = map->domid;
-            ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
-
-            DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
-                    handle, ref,
-                    map->ref_and_flags & MAPTRACK_GNTMAP_MASK, dom);
-
-            if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
-                 unlikely(ld == rd) )
+        if ( !(map->ref_and_flags & GNTMAP_device_map) )
+            continue;
+
+        dom = map->domid;
+        ref = map->ref_and_flags >> MAPTRACK_REF_SHIFT;
+
+        DPRINTK("Grant release (%hu) ref:(%hu) flags:(%x) dom:(%hu)\n",
+                handle, ref, map->ref_and_flags & MAPTRACK_GNTMAP_MASK, dom);
+
+        if ( unlikely((rd = find_domain_by_id(dom)) == NULL) ||
+             unlikely(ld == rd) )
+        {
+            if ( rd != NULL )
+                put_domain(rd);
+            printk(KERN_WARNING "Grant release: No dom%d\n", dom);
+            continue;
+        }
+
+        act = &rd->grant_table->active[ref];
+        sha = &rd->grant_table->shared[ref];
+
+        spin_lock(&rd->grant_table->lock);
+
+        if ( act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask) )
+        {
+            frame = act->frame;
+
+            if ( ( (act->pin & GNTPIN_hstw_mask) == 0 ) &&
+                 ( (act->pin & GNTPIN_devw_mask) >  0 ) )
             {
-                if ( rd != NULL )
-                    put_domain(rd);
-
-                printk(KERN_WARNING "Grant release: No dom%d\n", dom);
-                continue;
+                clear_bit(_GTF_writing, &sha->flags);
+                put_page_type(&frame_table[frame]);
             }
 
-            act = &rd->grant_table->active[ref];
-            sha = &rd->grant_table->shared[ref];
-
-            spin_lock(&rd->grant_table->lock);
-
-            if ( act->pin & (GNTPIN_devw_mask | GNTPIN_devr_mask) )
+            map->ref_and_flags &= ~GNTMAP_device_map;
+            act->pin &= ~(GNTPIN_devw_mask | GNTPIN_devr_mask);
+            if ( act->pin == 0 )
             {
-                frame = act->frame;
-
-                if ( ( (act->pin & GNTPIN_hstw_mask) == 0 ) &&
-                     ( (act->pin & GNTPIN_devw_mask) >  0 ) )
-                {
-                    clear_bit(_GTF_writing, &sha->flags);
-                    put_page_type(&frame_table[frame]);
-                }
-
-                act->pin &= ~(GNTPIN_devw_mask | GNTPIN_devr_mask);
-
-                if ( act->pin == 0 )
-                {
-                    clear_bit(_GTF_reading, &sha->flags);
-                    map->ref_and_flags = 0;
-                    put_page(&frame_table[frame]);
-                }
-                else
-                    map->ref_and_flags &= ~GNTMAP_device_map;
+                clear_bit(_GTF_reading, &sha->flags);
+                map->ref_and_flags = 0;
+                put_page(&frame_table[frame]);
             }
-
-            spin_unlock(&rd->grant_table->lock);
-
-            put_domain(rd);
-        }
+        }
+
+        spin_unlock(&rd->grant_table->lock);
+
+        put_domain(rd);
     }
 }
 
diff -r 22c30df92b11 -r 2704a88c3295 xen/common/memory.c
--- a/xen/common/memory.c       Fri Sep  9 08:56:14 2005
+++ b/xen/common/memory.c       Fri Sep  9 08:56:38 2005
@@ -31,8 +31,8 @@
     struct pfn_info *page;
     unsigned long    i;
 
-    if ( (extent_list != NULL)
-         && !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
+    if ( (extent_list != NULL) &&
+         !array_access_ok(extent_list, nr_extents, sizeof(*extent_list)) )
         return 0;
 
     if ( (extent_order != 0) && !IS_CAPABLE_PHYSDEV(current->domain) )
@@ -52,13 +52,14 @@
         if ( unlikely((page = alloc_domheap_pages(
             d, extent_order, flags)) == NULL) )
         {
-            DPRINTK("Could not allocate a frame\n");
+            DPRINTK("Could not allocate order=%d extent: id=%d flags=%x\n",
+                    extent_order, d->domain_id, flags);
             return i;
         }
 
         /* Inform the domain of the new page's machine address. */ 
-        if ( (extent_list != NULL)
-             && (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
+        if ( (extent_list != NULL) &&
+             (__put_user(page_to_pfn(page), &extent_list[i]) != 0) )
             return i;
     }
 
@@ -152,8 +153,9 @@
             reservation.extent_start += start_extent;
         reservation.nr_extents -= start_extent;
 
-        if ( unlikely(reservation.address_bits != 0)
-             && (reservation.address_bits > (get_order(max_page)+PAGE_SHIFT)) )
+        if ( (reservation.address_bits != 0) &&
+             (reservation.address_bits <
+              (get_order_from_pages(max_page) + PAGE_SHIFT)) )
         {
             if ( reservation.address_bits < 31 )
                 return -ENOMEM;
diff -r 22c30df92b11 -r 2704a88c3295 xen/common/page_alloc.c
--- a/xen/common/page_alloc.c   Fri Sep  9 08:56:14 2005
+++ b/xen/common/page_alloc.c   Fri Sep  9 08:56:38 2005
@@ -216,7 +216,7 @@
 #define NR_ZONES    3
 
 
-#define MAX_DMADOM_PFN 0x7FFFF /* 31 addressable bits */
+#define MAX_DMADOM_PFN 0x7FFFFUL /* 31 addressable bits */
 #define pfn_dom_zone_type(_pfn)                                 \
     (((_pfn) <= MAX_DMADOM_PFN) ? MEMZONE_DMADOM : MEMZONE_DOM)
 
@@ -485,43 +485,40 @@
 
 void init_domheap_pages(physaddr_t ps, physaddr_t pe)
 {
+    unsigned long s_tot, e_tot, s_dma, e_dma, s_nrm, e_nrm;
+
     ASSERT(!in_irq());
 
-    ps = round_pgup(ps) >> PAGE_SHIFT;
-    pe = round_pgdown(pe) >> PAGE_SHIFT;
-    if ( pe <= ps )
-        return;
-
-    if ( (ps < MAX_DMADOM_PFN) && (pe > MAX_DMADOM_PFN) )
-    {
-        init_heap_pages(
-            MEMZONE_DMADOM, pfn_to_page(ps), MAX_DMADOM_PFN - ps);
-        init_heap_pages(
-            MEMZONE_DOM, pfn_to_page(MAX_DMADOM_PFN), pe - MAX_DMADOM_PFN);
-    }
-    else
-    {
-        init_heap_pages(pfn_dom_zone_type(ps), pfn_to_page(ps), pe - ps);
-    }
+    s_tot = round_pgup(ps) >> PAGE_SHIFT;
+    e_tot = round_pgdown(pe) >> PAGE_SHIFT;
+
+    s_dma = min(s_tot, MAX_DMADOM_PFN + 1);
+    e_dma = min(e_tot, MAX_DMADOM_PFN + 1);
+    if ( s_dma < e_dma )
+        init_heap_pages(MEMZONE_DMADOM, pfn_to_page(s_dma), e_dma - s_dma);
+
+    s_nrm = max(s_tot, MAX_DMADOM_PFN + 1);
+    e_nrm = max(e_tot, MAX_DMADOM_PFN + 1);
+    if ( s_nrm < e_nrm )
+        init_heap_pages(MEMZONE_DOM, pfn_to_page(s_nrm), e_nrm - s_nrm);
 }
 
 
 struct pfn_info *alloc_domheap_pages(
     struct domain *d, unsigned int order, unsigned int flags)
 {
-    struct pfn_info *pg;
+    struct pfn_info *pg = NULL;
     cpumask_t mask;
     int i;
 
     ASSERT(!in_irq());
 
-    pg = NULL;
-    if (! (flags & ALLOC_DOM_DMA))
+    if ( !(flags & ALLOC_DOM_DMA) )
         pg = alloc_heap_pages(MEMZONE_DOM, order);
-    if (pg == NULL) {
-        if ( unlikely((pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL) )
+
+    if ( pg == NULL )
+        if ( (pg = alloc_heap_pages(MEMZONE_DMADOM, order)) == NULL )
             return NULL;
-    }
 
     mask = pg->u.free.cpumask;
     tlbflush_filter(mask, pg->tlbflush_timestamp);
diff -r 22c30df92b11 -r 2704a88c3295 xen/common/trace.c
--- a/xen/common/trace.c        Fri Sep  9 08:56:14 2005
+++ b/xen/common/trace.c        Fri Sep  9 08:56:38 2005
@@ -66,7 +66,7 @@
     }
 
     nr_pages = num_online_cpus() * opt_tbuf_size;
-    order    = get_order(nr_pages * PAGE_SIZE);
+    order    = get_order_from_pages(nr_pages);
     
     if ( (rawbuf = alloc_xenheap_pages(order)) == NULL )
     {
diff -r 22c30df92b11 -r 2704a88c3295 xen/common/xmalloc.c
--- a/xen/common/xmalloc.c      Fri Sep  9 08:56:14 2005
+++ b/xen/common/xmalloc.c      Fri Sep  9 08:56:38 2005
@@ -86,7 +86,7 @@
 static void *xmalloc_whole_pages(size_t size)
 {
     struct xmalloc_hdr *hdr;
-    unsigned int pageorder = get_order(size);
+    unsigned int pageorder = get_order_from_bytes(size);
 
     hdr = alloc_xenheap_pages(pageorder);
     if ( hdr == NULL )
@@ -159,7 +159,7 @@
     /* Big allocs free directly. */
     if ( hdr->size >= PAGE_SIZE )
     {
-        free_xenheap_pages(hdr, get_order(hdr->size));
+        free_xenheap_pages(hdr, get_order_from_bytes(hdr->size));
         return;
     }
 
diff -r 22c30df92b11 -r 2704a88c3295 xen/drivers/char/console.c
--- a/xen/drivers/char/console.c        Fri Sep  9 08:56:14 2005
+++ b/xen/drivers/char/console.c        Fri Sep  9 08:56:38 2005
@@ -627,7 +627,7 @@
     if ( bytes == 0 )
         return 0;
 
-    order = get_order(bytes);
+    order = get_order_from_bytes(bytes);
     debugtrace_buf = alloc_xenheap_pages(order);
     ASSERT(debugtrace_buf != NULL);
 
diff -r 22c30df92b11 -r 2704a88c3295 xen/drivers/char/serial.c
--- a/xen/drivers/char/serial.c Fri Sep  9 08:56:14 2005
+++ b/xen/drivers/char/serial.c Fri Sep  9 08:56:38 2005
@@ -366,8 +366,9 @@
 void serial_async_transmit(struct serial_port *port)
 {
     BUG_ON(!port->driver->tx_empty);
-    if ( !port->txbuf )
-        port->txbuf = alloc_xenheap_pages(get_order(SERIAL_TXBUFSZ));
+    if ( port->txbuf == NULL )
+        port->txbuf = alloc_xenheap_pages(
+            get_order_from_bytes(SERIAL_TXBUFSZ));
 }
 
 /*
diff -r 22c30df92b11 -r 2704a88c3295 xen/include/asm-x86/page.h
--- a/xen/include/asm-x86/page.h        Fri Sep  9 08:56:14 2005
+++ b/xen/include/asm-x86/page.h        Fri Sep  9 08:56:38 2005
@@ -280,12 +280,21 @@
 
 #ifndef __ASSEMBLY__
 
-static __inline__ int get_order(unsigned long size)
+static inline int get_order_from_bytes(physaddr_t size)
 {
     int order;
     size = (size-1) >> PAGE_SHIFT;
     for ( order = 0; size; order++ )
         size >>= 1;
+    return order;
+}
+
+static inline int get_order_from_pages(unsigned long nr_pages)
+{
+    int order;
+    nr_pages--;
+    for ( order = 0; nr_pages; order++ )
+        nr_pages >>= 1;
     return order;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.