[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [IA64] Add memory operations for xen/ia64



# HG changeset patch
# User awilliam@xxxxxxxxxxx
# Node ID 827c65c06a66dd067c64ebb56b17b4379d83cbf9
# Parent  0a6f5527ca4bd560fc5c750e7ade3a0040144e40
[IA64] Add memory operations for xen/ia64

This patch removes ugly hack upon memory operation and thus
allow inc/decrease_reservation op available for xen/ia64 now.

As a result:
  * Now we conform to common sequence where all domain pages
    are allocated together by increase_reservation before
    image builder
  * physmap table is now built at point of arch_set_info_guest
  * DOM0_GETMEMLIST can only query without allocation now
  
After this patch, some long-existing hacks due to mismatched
memory interface are cleaned this time. Also this is a base
step toward introducing balloon feature into xen/ia64.
    
Signed-off-by Kevin Tian <kevin.tian@xxxxxxxxx>

diff -r 0a6f5527ca4b -r 827c65c06a66 tools/ioemu/vl.c
--- a/tools/ioemu/vl.c  Tue Apr 04 09:39:45 2006 -0600
+++ b/tools/ioemu/vl.c  Tue Apr 04 09:43:41 2006 -0600
@@ -3226,7 +3226,8 @@ int main(int argc, char **argv)
     }
 
     if ( xc_ia64_get_pfn_list(xc_handle, domid,
-                              page_array, IO_PAGE_START >> PAGE_SHIFT, 1) != 1 
)
+                              page_array,
+                              ram_pages + (GFW_SIZE >> PAGE_SHIFT), 1) != 1 )
     {
         fprintf(logfile, "xc_ia64_get_pfn_list returned error %d\n", errno);
         exit(-1);
diff -r 0a6f5527ca4b -r 827c65c06a66 tools/libxc/xc_ia64_stubs.c
--- a/tools/libxc/xc_ia64_stubs.c       Tue Apr 04 09:39:45 2006 -0600
+++ b/tools/libxc/xc_ia64_stubs.c       Tue Apr 04 09:43:41 2006 -0600
@@ -101,7 +101,7 @@ int xc_ia64_copy_to_domain_pages(int xc_
         goto error_out;
     }
     if ( xc_ia64_get_pfn_list(xc_handle, domid, page_array,
-                dst_pfn>>PAGE_SHIFT, nr_pages) != nr_pages ){
+                dst_pfn, nr_pages) != nr_pages ){
         PERROR("Could not get the page frame list");
         goto error_out;
     }
@@ -121,10 +121,17 @@ error_out:
 
 
 #define HOB_SIGNATURE 0x3436474953424f48 // "HOBSIG64"
-#define GFW_HOB_START    ((4UL<<30)-(14UL<<20))    //4G -14M
-#define GFW_HOB_SIZE     (1UL<<20)              //1M
-#define MEM_G   (1UL << 30) 
-#define MEM_M   (1UL << 20) 
+#define GFW_HOB_START         ((4UL<<30)-(14UL<<20))    //4G -14M
+#define GFW_HOB_SIZE          (1UL<<20)              //1M
+#define RAW_GFW_START_NR(s)   ((s) >> PAGE_SHIFT)
+#define RAW_GFW_HOB_START_NR(s)                \
+        (RAW_GFW_START_NR(s) + ((GFW_HOB_START - GFW_START) >> PAGE_SHIFT))
+#define RAW_GFW_IMAGE_START_NR(s,i)            \
+        (RAW_GFW_START_NR(s) + (((GFW_SIZE - (i))) >> PAGE_SHIFT))
+#define RAW_IO_PAGE_START_NR(s)                \
+        (RAW_GFW_START_NR(s) + (GFW_SIZE >> PAGE_SHIFT))
+#define RAW_STORE_PAGE_START_NR(s)             \
+        (RAW_IO_PAGE_START_NR(s) + (IO_PAGE_SIZE >> PAGE_SHFIT))
 
 typedef struct {
     unsigned long signature;
@@ -179,7 +186,8 @@ static int add_mem_hob(void* hob_buf, un
 static int add_mem_hob(void* hob_buf, unsigned long dom_mem_size);
 static int build_hob (void* hob_buf, unsigned long hob_buf_size,
                   unsigned long dom_mem_size);
-static int load_hob(int xc_handle,uint32_t dom, void *hob_buf);
+static int load_hob(int xc_handle,uint32_t dom, void *hob_buf,
+               unsigned long dom_mem_size);
 
 int xc_ia64_build_hob(int xc_handle, uint32_t dom, unsigned long memsize){
 
@@ -191,13 +199,13 @@ int xc_ia64_build_hob(int xc_handle, uin
         return -1;
     }
 
-    if ( build_hob( hob_buf, GFW_HOB_SIZE, memsize<<20) < 0){
+    if ( build_hob( hob_buf, GFW_HOB_SIZE, memsize) < 0){
         free (hob_buf);
         PERROR("Could not build hob");
         return -1;
     }
 
-    if ( load_hob( xc_handle, dom, hob_buf) <0){
+    if ( load_hob( xc_handle, dom, hob_buf, memsize) < 0){
         free (hob_buf);
         PERROR("Could not load hob");
        return -1;
@@ -317,7 +325,8 @@ err_out:
 }
 
 static int 
-load_hob(int xc_handle, uint32_t dom, void *hob_buf)
+load_hob(int xc_handle, uint32_t dom, void *hob_buf,
+        unsigned long dom_mem_size)
 {
     // hob_buf should be page aligned
     int hob_size;
@@ -336,7 +345,7 @@ load_hob(int xc_handle, uint32_t dom, vo
     nr_pages = (hob_size + PAGE_SIZE -1) >> PAGE_SHIFT;
     
     return xc_ia64_copy_to_domain_pages(xc_handle, dom,
-            hob_buf, GFW_HOB_START, nr_pages );
+            hob_buf, RAW_GFW_HOB_START_NR(dom_mem_size), nr_pages );
 }
 
 #define MIN(x, y) ((x) < (y)) ? (x) : (y)
@@ -576,13 +585,8 @@ static int setup_guest(  int xc_handle,
     unsigned long page_array[2];
     shared_iopage_t *sp;
     int i;
-
-    // FIXME: initialize pfn list for a temp hack
-    if (xc_ia64_get_pfn_list(xc_handle, dom, NULL, -1, -1) == -1) {
-       PERROR("Could not allocate continuous memory");
-       goto error_out;
-    }
-    
+    unsigned long dom_memsize = (memsize << 20);
+
     if ((image_size > 12 * MEM_M) || (image_size & (PAGE_SIZE - 1))) {
         PERROR("Guest firmware size is incorrect [%ld]?", image_size);
         return -1;
@@ -590,19 +594,21 @@ static int setup_guest(  int xc_handle,
 
     /* Load guest firmware */
     if( xc_ia64_copy_to_domain_pages( xc_handle, dom, 
-            image, 4*MEM_G-image_size, image_size>>PAGE_SHIFT)) {
+            image, RAW_GFW_IMAGE_START_NR(dom_memsize, image_size),
+            image_size>>PAGE_SHIFT)) {
         PERROR("Could not load guest firmware into domain");
         goto error_out;
     }
 
     /* Hand-off state passed to guest firmware */
-    if (xc_ia64_build_hob(xc_handle, dom, memsize) < 0){
+    if (xc_ia64_build_hob(xc_handle, dom, dom_memsize) < 0){
         PERROR("Could not build hob\n");
        goto error_out;
     }
 
     /* Retrieve special pages like io, xenstore, etc. */
-    if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array, 
IO_PAGE_START>>PAGE_SHIFT, 2) != 2 )
+    if ( xc_ia64_get_pfn_list(xc_handle, dom, page_array,
+                               RAW_IO_PAGE_START_NR(dom_memsize), 2) != 2 )
     {
         PERROR("Could not get the page frame list");
         goto error_out;
diff -r 0a6f5527ca4b -r 827c65c06a66 xen/arch/ia64/vmx/vmx_init.c
--- a/xen/arch/ia64/vmx/vmx_init.c      Tue Apr 04 09:39:45 2006 -0600
+++ b/xen/arch/ia64/vmx/vmx_init.c      Tue Apr 04 09:43:41 2006 -0600
@@ -327,13 +327,15 @@ io_range_t io_ranges[] = {
 #define VMX_SYS_PAGES  (2 + (GFW_SIZE >> PAGE_SHIFT))
 #define VMX_CONFIG_PAGES(d) ((d)->max_pages - VMX_SYS_PAGES)
 
-int vmx_alloc_contig_pages(struct domain *d)
-{
-       unsigned long i, j, start,tmp, end, pgnr, conf_nr;
-       struct page_info *page;
+int vmx_build_physmap_table(struct domain *d)
+{
+       unsigned long i, j, start, tmp, end, mfn;
        struct vcpu *v = d->vcpu[0];
-
+       struct list_head *list_ent = d->page_list.next;
+
+       ASSERT(!d->arch.physmap_built);
        ASSERT(!test_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags));
+       ASSERT(d->max_pages == d->tot_pages);
 
        /* Mark I/O ranges */
        for (i = 0; i < (sizeof(io_ranges) / sizeof(io_range_t)); i++) {
@@ -343,103 +345,54 @@ int vmx_alloc_contig_pages(struct domain
                assign_domain_page(d, j, io_ranges[i].type);
        }
 
-       conf_nr = VMX_CONFIG_PAGES(d);
-    if((conf_nr<<PAGE_SHIFT)<(1UL<<(_PAGE_SIZE_64M+1)))
-        panic("vti domain needs 128M memory at least\n");
-/*
-       order = get_order_from_pages(conf_nr);
-       if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
-           printk("Could not allocate order=%d pages for vmx contig alloc\n",
-                       order);
-           return -1;
-       }
-*/
- 
-/* reserve contiguous 64M for linux kernel */
-
-    if (unlikely((page = 
alloc_domheap_pages(d,(KERNEL_TR_PAGE_SHIFT-PAGE_SHIFT), 0)) == NULL)) {
-        printk("No enough memory for vti domain!!!\n");
-        return -1;
-    }
-    pgnr = page_to_mfn(page);
-       for 
(i=(1UL<<KERNEL_TR_PAGE_SHIFT);i<(1UL<<(KERNEL_TR_PAGE_SHIFT+1));i+=PAGE_SIZE,pgnr++){
-           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
-    }
-
-       for (i = 0; i < (1UL<<KERNEL_TR_PAGE_SHIFT) ; i += PAGE_SIZE){
-        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
-            printk("No enough memory for vti domain!!!\n");
-            return -1;
-        }
-           pgnr = page_to_mfn(page);
-           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
-    }
-
        /* Map normal memory below 3G */
-       end = conf_nr << PAGE_SHIFT;
-    tmp = end < MMIO_START ? end : MMIO_START;
-       for (i = (1UL<<(KERNEL_TR_PAGE_SHIFT+1)); i < tmp; i += PAGE_SIZE){
-        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
-            printk("No enough memory for vti domain!!!\n");
-            return -1;
-        }
-           pgnr = page_to_mfn(page);
-           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
-    }
+       end = VMX_CONFIG_PAGES(d) << PAGE_SHIFT;
+       tmp = end < MMIO_START ? end : MMIO_START;
+       for (i = 0; (i < tmp) && (list_ent != &d->page_list); i += PAGE_SIZE) {
+           mfn = page_to_mfn(list_entry(
+               list_ent, struct page_info, list));
+           assign_domain_page(d, i, mfn << PAGE_SHIFT);
+           list_ent = mfn_to_page(mfn)->list.next;
+       }
+       ASSERT(list_ent != &d->page_list);
+
        /* Map normal memory beyond 4G */
        if (unlikely(end > MMIO_START)) {
            start = 4 * MEM_G;
            end = start + (end - 3 * MEM_G);
-           for (i = start; i < end; i += PAGE_SIZE){
-            if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
-                printk("No enough memory for vti domain!!!\n");
-                return -1;
-            }
-            pgnr = page_to_mfn(page);
-            assign_domain_page(d, i, pgnr << PAGE_SHIFT);
+           for (i = start; (i < end) &&
+                (list_ent != &d->page_list); i += PAGE_SIZE) {
+               mfn = page_to_mfn(list_entry(
+                   list_ent, struct page_info, list));
+               assign_domain_page(d, i, mfn << PAGE_SHIFT);
+               list_ent = mfn_to_page(mfn)->list.next;
+           }
+           ASSERT(list_ent != &d->page_list);
         }
-       }
+        
+       /* Map guest firmware */
+       for (i = GFW_START; (i < GFW_START + GFW_SIZE) &&
+               (list_ent != &d->page_list); i += PAGE_SIZE) {
+           mfn = page_to_mfn(list_entry(
+               list_ent, struct page_info, list));
+           assign_domain_page(d, i, mfn << PAGE_SHIFT);
+           list_ent = mfn_to_page(mfn)->list.next;
+       }
+       ASSERT(list_ent != &d->page_list);
+
+       /* Map for shared I/O page and xenstore */
+       mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
+       assign_domain_page(d, IO_PAGE_START, mfn << PAGE_SHIFT);
+       list_ent = mfn_to_page(mfn)->list.next;
+       ASSERT(list_ent != &d->page_list);
+
+       mfn = page_to_mfn(list_entry(list_ent, struct page_info, list));
+       assign_domain_page(d, STORE_PAGE_START, mfn << PAGE_SHIFT);
+       list_ent = mfn_to_page(mfn)->list.next;
+       ASSERT(list_ent == &d->page_list);
 
        d->arch.max_pfn = end >> PAGE_SHIFT;
-/*
-       order = get_order_from_pages(GFW_SIZE >> PAGE_SHIFT);
-       if (unlikely((page = alloc_domheap_pages(d, order, 0)) == NULL)) {
-           printk("Could not allocate order=%d pages for vmx contig alloc\n",
-                       order);`
-           return -1;
-       }
-*/
-       /* Map guest firmware */
-       for (i = GFW_START; i < GFW_START + GFW_SIZE; i += PAGE_SIZE, pgnr++){
-        if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
-            printk("No enough memory for vti domain!!!\n");
-            return -1;
-        }
-           pgnr = page_to_mfn(page);
-           assign_domain_page(d, i, pgnr << PAGE_SHIFT);
-    }
-
-/*
-       if (unlikely((page = alloc_domheap_pages(d, 1, 0)) == NULL)) {
-           printk("Could not allocate order=1 pages for vmx contig alloc\n");
-           return -1;
-       }
-*/
-       /* Map for shared I/O page and xenstore */
-    if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
-        printk("No enough memory for vti domain!!!\n");
-        return -1;
-    }
-       pgnr = page_to_mfn(page);
-       assign_domain_page(d, IO_PAGE_START, pgnr << PAGE_SHIFT);
-
-    if (unlikely((page = alloc_domheap_pages(d, 0, 0)) == NULL)) {
-        printk("No enough memory for vti domain!!!\n");
-        return -1;
-    }
-       pgnr = page_to_mfn(page);
-       assign_domain_page(d, STORE_PAGE_START, pgnr << PAGE_SHIFT);
-
+       d->arch.physmap_built = 1;
        set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
        return 0;
 }
@@ -447,6 +400,10 @@ void vmx_setup_platform(struct domain *d
 void vmx_setup_platform(struct domain *d, struct vcpu_guest_context *c)
 {
        ASSERT(d != dom0); /* only for non-privileged vti domain */
+
+       if (!d->arch.physmap_built)
+           vmx_build_physmap_table(d);
+
        d->arch.vmx_platform.shared_page_va =
                (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START));
        /* TEMP */
diff -r 0a6f5527ca4b -r 827c65c06a66 xen/arch/ia64/xen/dom0_ops.c
--- a/xen/arch/ia64/xen/dom0_ops.c      Tue Apr 04 09:39:45 2006 -0600
+++ b/xen/arch/ia64/xen/dom0_ops.c      Tue Apr 04 09:43:41 2006 -0600
@@ -157,40 +157,45 @@ long arch_do_dom0_op(dom0_op_t *op, GUES
      */
     case DOM0_GETMEMLIST:
     {
-        unsigned long i;
+        unsigned long i = 0;
         struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
         unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
         unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
         unsigned long mfn;
+        struct list_head *list_ent;
 
         ret = -EINVAL;
         if ( d != NULL )
         {
             ret = 0;
 
-            /* A temp trick here. When max_pfns == -1, we assume
-             * the request is for  machine contiguous pages, so request
-             * all pages at first query
-             */
-            if ( (op->u.getmemlist.max_pfns == -1UL) &&
-                 !test_bit(ARCH_VMX_CONTIG_MEM,
-                           &d->vcpu[0]->arch.arch_vmx.flags) ) {
-                ret = (long) vmx_alloc_contig_pages(d);
-                put_domain(d);
-                return ret ? (-ENOMEM) : 0;
-            }
-
-            for ( i = start_page; i < (start_page + nr_pages); i++ )
-            {
-                mfn = gmfn_to_mfn_foreign(d, i);
-
-                if ( copy_to_guest_offset(op->u.getmemlist.buffer,
+            list_ent = d->page_list.next;
+            while ( (i != start_page) && (list_ent != &d->page_list)) {
+                mfn = page_to_mfn(list_entry(
+                    list_ent, struct page_info, list));
+                i++;
+                list_ent = mfn_to_page(mfn)->list.next;
+            }
+
+            if (i == start_page)
+            {
+                while((i < (start_page + nr_pages)) &&
+                      (list_ent != &d->page_list))
+                {
+                    mfn = page_to_mfn(list_entry(
+                        list_ent, struct page_info, list));
+
+                    if ( copy_to_guest_offset(op->u.getmemlist.buffer,
                                           i - start_page, &mfn, 1) )
-                {
-                    ret = -EFAULT;
-                    break;
-                }
-            }
+                    {
+                        ret = -EFAULT;
+                        break;
+                    }
+                    i++;
+                    list_ent = mfn_to_page(mfn)->list.next;
+                }
+            } else
+                ret = -ENOMEM;
 
             op->u.getmemlist.num_pfns = i - start_page;
             copy_to_guest(u_dom0_op, op, 1);
diff -r 0a6f5527ca4b -r 827c65c06a66 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Tue Apr 04 09:39:45 2006 -0600
+++ b/xen/arch/ia64/xen/domain.c        Tue Apr 04 09:43:41 2006 -0600
@@ -76,6 +76,7 @@ extern void serial_input_init(void);
 extern void serial_input_init(void);
 
 static void init_switch_stack(struct vcpu *v);
+void build_physmap_table(struct domain *d);
 
 /* this belongs in include/asm, but there doesn't seem to be a suitable place 
*/
 void arch_domain_destroy(struct domain *d)
@@ -272,6 +273,7 @@ int arch_domain_create(struct domain *d)
        memset(d->arch.mm, 0, sizeof(*d->arch.mm));
        INIT_LIST_HEAD(&d->arch.mm->pt_list);
 
+       d->arch.physmap_built = 0;
        if ((d->arch.mm->pgd = pgd_alloc(d->arch.mm)) == NULL)
            goto fail_nomem;
 
@@ -317,7 +319,8 @@ int arch_set_info_guest(struct vcpu *v, 
                vmx_setup_platform(d, c);
 
            vmx_final_setup_guest(v);
-       }
+       } else if (!d->arch.physmap_built)
+           build_physmap_table(d);
 
        *regs = c->regs;
        if (v == d->vcpu[0]) {
@@ -583,44 +586,24 @@ void assign_domain_page(struct domain *d
         *(mpt_table + (physaddr>>PAGE_SHIFT))=(mpaddr>>PAGE_SHIFT);
     }
 }
-#if 0
-/* map a physical address with specified I/O flag */
-void assign_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned 
long flags)
-{
-       struct mm_struct *mm = d->arch.mm;
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
-       pte_t io_pte;
-
-       if (!mm->pgd) {
-               printk("assign_domain_page: domain pgd must exist!\n");
-               return;
-       }
-       ASSERT(flags & GPFN_IO_MASK);
-
-       pgd = pgd_offset(mm,mpaddr);
-       if (pgd_none(*pgd))
-               pgd_populate(mm, pgd, pud_alloc_one(mm,mpaddr));
-
-       pud = pud_offset(pgd, mpaddr);
-       if (pud_none(*pud))
-               pud_populate(mm, pud, pmd_alloc_one(mm,mpaddr));
-
-       pmd = pmd_offset(pud, mpaddr);
-       if (pmd_none(*pmd))
-               pmd_populate_kernel(mm, pmd, pte_alloc_one_kernel(mm,mpaddr));
-//             pmd_populate(mm, pmd, pte_alloc_one(mm,mpaddr));
-
-       pte = pte_offset_map(pmd, mpaddr);
-       if (pte_none(*pte)) {
-               pte_val(io_pte) = flags;
-               set_pte(pte, io_pte);
-       }
-       else printk("assign_domain_page: mpaddr %lx already mapped!\n",mpaddr);
-}
-#endif
+
+void build_physmap_table(struct domain *d)
+{
+       struct list_head *list_ent = d->page_list.next;
+       unsigned long mfn, i = 0;
+
+       ASSERT(!d->arch.physmap_built);
+       while(list_ent != &d->page_list) {
+           mfn = page_to_mfn(list_entry(
+               list_ent, struct page_info, list));
+           assign_domain_page(d, i << PAGE_SHIFT, mfn << PAGE_SHIFT);
+
+           i++;
+           list_ent = mfn_to_page(mfn)->list.next;
+       }
+       d->arch.physmap_built = 1;
+}
+
 void mpafoo(unsigned long mpaddr)
 {
        extern unsigned long privop_trace;
@@ -650,7 +633,6 @@ unsigned long lookup_domain_mpa(struct d
                return *(unsigned long *)pte;
        }
 #endif
-tryagain:
        if (pgd_present(*pgd)) {
                pud = pud_offset(pgd,mpaddr);
                if (pud_present(*pud)) {
@@ -665,12 +647,12 @@ tryagain:
                        }
                }
        }
-       /* if lookup fails and mpaddr is "legal", "create" the page */
        if ((mpaddr >> PAGE_SHIFT) < d->max_pages) {
-               if (assign_new_domain_page(d,mpaddr)) goto tryagain;
-       }
-       printk("lookup_domain_mpa: bad mpa 0x%lx (> 0x%lx)\n",
-               mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
+               printk("lookup_domain_mpa: non-allocated mpa 0x%lx (< 0x%lx)\n",
+                       mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
+       } else
+               printk("lookup_domain_mpa: bad mpa 0x%lx (> 0x%lx)\n",
+                       mpaddr, (unsigned long) d->max_pages<<PAGE_SHIFT);
        mpafoo(mpaddr);
        return 0;
 }
diff -r 0a6f5527ca4b -r 827c65c06a66 xen/arch/ia64/xen/hypercall.c
--- a/xen/arch/ia64/xen/hypercall.c     Tue Apr 04 09:39:45 2006 -0600
+++ b/xen/arch/ia64/xen/hypercall.c     Tue Apr 04 09:43:41 2006 -0600
@@ -76,25 +76,8 @@ xen_hypercall (struct pt_regs *regs)
                break;
 
            case __HYPERVISOR_memory_op:
-               /* we don't handle reservations; just return success */
-               {
-                   struct xen_memory_reservation reservation;
-                   void *arg = (void *) regs->r15;
-
-                   switch(regs->r14) {
-                   case XENMEM_increase_reservation:
-                   case XENMEM_decrease_reservation:
-                       if (copy_from_user(&reservation, arg,
-                               sizeof(reservation)))
-                           regs->r8 = -EFAULT;
-                       else
-                           regs->r8 = reservation.nr_extents;
-                       break;
-                   default:
-                       regs->r8 = do_memory_op((int) regs->r14, 
guest_handle_from_ptr(regs->r15, void));
-                       break;
-                   }
-               }
+               regs->r8 = do_memory_op(regs->r14,
+                       guest_handle_from_ptr(regs->r15, void));
                break;
 
            case __HYPERVISOR_event_channel_op:
@@ -102,19 +85,24 @@ xen_hypercall (struct pt_regs *regs)
                break;
 
            case __HYPERVISOR_grant_table_op:
-               regs->r8 = do_grant_table_op((unsigned int) regs->r14, 
guest_handle_from_ptr(regs->r15, void), (unsigned int) regs->r16);
+               regs->r8 = do_grant_table_op((unsigned int) regs->r14,
+                       guest_handle_from_ptr(regs->r15, void),
+                       (unsigned int) regs->r16);
                break;
 
            case __HYPERVISOR_console_io:
-               regs->r8 = do_console_io((int) regs->r14, (int) regs->r15, 
guest_handle_from_ptr(regs->r16, char));
+               regs->r8 = do_console_io((int) regs->r14, (int) regs->r15,
+                       guest_handle_from_ptr(regs->r16, char));
                break;
 
            case __HYPERVISOR_xen_version:
-               regs->r8 = do_xen_version((int) regs->r14, 
guest_handle_from_ptr(regs->r15, void));
+               regs->r8 = do_xen_version((int) regs->r14,
+                       guest_handle_from_ptr(regs->r15, void));
                break;
 
            case __HYPERVISOR_multicall:
-               regs->r8 = do_multicall(guest_handle_from_ptr(regs->r14, 
multicall_entry_t), (unsigned int) regs->r15);
+               regs->r8 = do_multicall(guest_handle_from_ptr(regs->r14,
+                       multicall_entry_t), (unsigned int) regs->r15);
                break;
 
            default:
diff -r 0a6f5527ca4b -r 827c65c06a66 xen/include/asm-ia64/domain.h
--- a/xen/include/asm-ia64/domain.h     Tue Apr 04 09:39:45 2006 -0600
+++ b/xen/include/asm-ia64/domain.h     Tue Apr 04 09:43:41 2006 -0600
@@ -27,6 +27,7 @@ struct arch_domain {
     int rid_bits;              /* number of virtual rid bits (default: 18) */
     int breakimm;
 
+    int physmap_built;         /* Whether is physmap built or not */
     int imp_va_msb;
     /* System pages out of guest memory, like for xenstore/console */
     unsigned long sys_pgnr;
diff -r 0a6f5527ca4b -r 827c65c06a66 xen/include/asm-ia64/vmx.h
--- a/xen/include/asm-ia64/vmx.h        Tue Apr 04 09:39:45 2006 -0600
+++ b/xen/include/asm-ia64/vmx.h        Tue Apr 04 09:43:41 2006 -0600
@@ -40,7 +40,7 @@ extern void vmx_save_state(struct vcpu *
 extern void vmx_save_state(struct vcpu *v);
 extern void vmx_load_state(struct vcpu *v);
 extern void show_registers(struct pt_regs *regs);
-extern int vmx_alloc_contig_pages(struct domain *d);
+extern int vmx_build_physmap_table(struct domain *d);
 extern unsigned long __gpfn_to_mfn_foreign(struct domain *d, unsigned long 
gpfn);
 extern void sync_split_caches(void);
 extern void vmx_virq_line_assist(struct vcpu *v);

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.