[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Complete arch_domain_create refactoring for ia64.



# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID a9ead230cc6038dd4905574b70328d3889c22c03
# Parent  d783bdd14f2eff514e36b92ded0e3d22be7cf3d0
Complete arch_domain_create refactoring for ia64.

Signed-off-by: Kevin Tian <kevin.tian@xxxxxxxxx>

diff -r d783bdd14f2e -r a9ead230cc60 xen/arch/ia64/xen/domain.c
--- a/xen/arch/ia64/xen/domain.c        Sat Jan 14 22:40:09 2006
+++ b/xen/arch/ia64/xen/domain.c        Mon Jan 16 13:47:31 2006
@@ -65,6 +65,7 @@
 
 unsigned long map_domain_page0(struct domain *);
 extern unsigned long dom_fw_setup(struct domain *, char *, int);
+static void init_switch_stack(struct vcpu *v);
 
 /* this belongs in include/asm, but there doesn't seem to be a suitable place 
*/
 void arch_domain_destroy(struct domain *d)
@@ -145,27 +146,45 @@
 struct vcpu *alloc_vcpu_struct(struct domain *d, unsigned int vcpu_id)
 {
        struct vcpu *v;
+       struct thread_info *ti;
 
        /* Still keep idle vcpu0 static allocated at compilation, due
         * to some code from Linux still requires it in early phase.
         */
        if (is_idle_domain(d) && !vcpu_id)
-               return idle_vcpu[0];
-
-       if ((v = alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER)) == NULL)
+           v = idle_vcpu[0];
+       else {
+           if ((v = alloc_xenheap_pages(KERNEL_STACK_SIZE_ORDER)) == NULL)
                return NULL;
-
-       memset(v, 0, sizeof(*v)); 
-        memcpy(&v->arch, &idle_vcpu[0]->arch, sizeof(v->arch));
+           memset(v, 0, sizeof(*v)); 
+       }
+
+       ti = alloc_thread_info(v);
+       /* Clear thread_info to clear some important fields, like
+        * preempt_count
+        */
+       memset(ti, 0, sizeof(struct thread_info));
+       init_switch_stack(v);
 
        if (!is_idle_domain(d)) {
            v->arch.privregs = 
                alloc_xenheap_pages(get_order(sizeof(mapped_regs_t)));
            BUG_ON(v->arch.privregs == NULL);
            memset(v->arch.privregs, 0, PAGE_SIZE);
-       }
-
-       printf("arch_vcpu_info=%p\n", v->arch.privregs);
+
+           if (!vcpu_id)
+               memset(&d->shared_info->evtchn_mask[0], 0xff,
+                   sizeof(d->shared_info->evtchn_mask));
+
+           v->vcpu_info = &(d->shared_info->vcpu_info[0]);
+           v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
+           v->arch.metaphysical_rr4 = d->arch.metaphysical_rr4;
+           v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
+           v->arch.metaphysical_saved_rr4 = d->arch.metaphysical_rr4;
+           v->arch.starting_rid = d->arch.starting_rid;
+           v->arch.ending_rid = d->arch.ending_rid;
+           v->arch.breakimm = d->arch.breakimm;
+       }
 
        return v;
 }
@@ -195,34 +214,19 @@
 
 int arch_domain_create(struct domain *d)
 {
-       struct thread_info *ti = alloc_thread_info(v);
-
-       /* Clear thread_info to clear some important fields, like preempt_count 
*/
-       memset(ti, 0, sizeof(struct thread_info));
-       init_switch_stack(v);
-
        // the following will eventually need to be negotiated dynamically
        d->xen_vastart = XEN_START_ADDR;
        d->xen_vaend = XEN_END_ADDR;
        d->shared_info_va = SHAREDINFO_ADDR;
 
-       if (is_idle_vcpu(v))
+       if (is_idle_domain(d))
            return 0;
 
-       d->shared_info = (void *)alloc_xenheap_page();
-       if (!d->shared_info) {
-               printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
-               while (1);
-       }
+       if ((d->shared_info = (void *)alloc_xenheap_page()) == NULL)
+           goto fail_nomem;
        memset(d->shared_info, 0, PAGE_SIZE);
-       if (v == d->vcpu[0])
-           memset(&d->shared_info->evtchn_mask[0], 0xff,
-               sizeof(d->shared_info->evtchn_mask));
-
-       v->vcpu_info = &(d->shared_info->vcpu_info[0]);
 
        d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
-
        /* We may also need emulation rid for region4, though it's unlikely
         * to see guest issue uncacheable access in metaphysical mode. But
         * keep such info here may be more sane.
@@ -230,34 +234,27 @@
        if (((d->arch.metaphysical_rr0 = allocate_metaphysical_rr()) == -1UL)
         || ((d->arch.metaphysical_rr4 = allocate_metaphysical_rr()) == -1UL))
                BUG();
-//     VCPU(v, metaphysical_mode) = 1;
-       v->arch.metaphysical_rr0 = d->arch.metaphysical_rr0;
-       v->arch.metaphysical_rr4 = d->arch.metaphysical_rr4;
-       v->arch.metaphysical_saved_rr0 = d->arch.metaphysical_rr0;
-       v->arch.metaphysical_saved_rr4 = d->arch.metaphysical_rr4;
 #define DOMAIN_RID_BITS_DEFAULT 18
        if (!allocate_rid_range(d,DOMAIN_RID_BITS_DEFAULT)) // FIXME
                BUG();
-       v->arch.starting_rid = d->arch.starting_rid;
-       v->arch.ending_rid = d->arch.ending_rid;
        d->arch.breakimm = 0x1000;
-       v->arch.breakimm = d->arch.breakimm;
-
        d->arch.sys_pgnr = 0;
-       d->arch.mm = xmalloc(struct mm_struct);
-       if (unlikely(!d->arch.mm)) {
-               printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
-               return -ENOMEM;
-       }
+
+       if ((d->arch.mm = xmalloc(struct mm_struct)) == NULL)
+           goto fail_nomem;
        memset(d->arch.mm, 0, sizeof(*d->arch.mm));
-       d->arch.mm->pgd = pgd_alloc(d->arch.mm);
-       if (unlikely(!d->arch.mm->pgd)) {
-               printk("Can't allocate pgd for domain %d\n",d->domain_id);
-               return -ENOMEM;
-       }
+
+       if ((d->arch.mm->pgd = pgd_alloc(d->arch.mm)) == NULL)
+           goto fail_nomem;
+
        printf ("arch_domain_create: domain=%p\n", d);
-
        return 0;
+
+fail_nomem:
+       free_xenheap_page(d->shared_info);
+       xfree(d->arch.mm);
+       pgd_free(d->arch.mm->pgd);
+       return -ENOMEM;
 }
 
 void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.