[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] Various fixes for multi-domain prep



ChangeSet 1.1709.1.3, 2005/06/13 16:03:21-06:00, djm@xxxxxxxxxxxxxxxxxx

        Various fixes for multi-domain prep
        
        Signed-off-by: Matthew Chapman <matthewc@xxxxxx>



 arch/ia64/dom0_ops.c                   |   58 ++++++--
 arch/ia64/domain.c                     |  220 ++++++++++++++-------------------
 arch/ia64/hypercall.c                  |   24 +++
 arch/ia64/patch/linux-2.6.11/uaccess.h |   15 ++
 arch/ia64/tools/mkbuildtree            |    2 
 arch/ia64/vcpu.c                       |    6 
 arch/ia64/xenmisc.c                    |    8 -
 include/asm-ia64/config.h              |    5 
 include/asm-ia64/domain.h              |    3 
 include/public/arch-ia64.h             |   11 +
 10 files changed, 196 insertions(+), 156 deletions(-)


diff -Nru a/xen/arch/ia64/dom0_ops.c b/xen/arch/ia64/dom0_ops.c
--- a/xen/arch/ia64/dom0_ops.c  2005-06-19 14:03:47 -04:00
+++ b/xen/arch/ia64/dom0_ops.c  2005-06-19 14:03:47 -04:00
@@ -18,14 +18,6 @@
 #include <xen/console.h>
 #include <public/sched_ctl.h>
 
-#define TRC_DOM0OP_ENTER_BASE  0x00020000
-#define TRC_DOM0OP_LEAVE_BASE  0x00030000
-
-static int msr_cpu_mask;
-static unsigned long msr_addr;
-static unsigned long msr_lo;
-static unsigned long msr_hi;
-
 long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op)
 {
     long ret = 0;
@@ -35,6 +27,49 @@
 
     switch ( op->cmd )
     {
+    /*
+     * NOTE: DOM0_GETMEMLIST has somewhat different semantics on IA64 -
+     * it actually allocates and maps pages.
+     */
+    case DOM0_GETMEMLIST:
+    {
+        unsigned long i;
+        struct domain *d = find_domain_by_id(op->u.getmemlist.domain);
+        unsigned long start_page = op->u.getmemlist.max_pfns >> 32;
+        unsigned long nr_pages = op->u.getmemlist.max_pfns & 0xffffffff;
+        unsigned long pfn;
+        unsigned long *buffer = op->u.getmemlist.buffer;
+        struct page *page;
+
+        ret = -EINVAL;
+        if ( d != NULL )
+        {
+            ret = 0;
+
+            for ( i = start_page; i < (start_page + nr_pages); i++ )
+            {
+                page = map_new_domain_page(d, i << PAGE_SHIFT);
+                if ( page == NULL )
+                {
+                    ret = -ENOMEM;
+                    break;
+                }
+                pfn = page_to_pfn(page);
+                if ( put_user(pfn, buffer) )
+                {
+                    ret = -EFAULT;
+                    break;
+                }
+                buffer++;
+            }
+
+            op->u.getmemlist.num_pfns = i - start_page;
+            copy_to_user(u_dom0_op, op, sizeof(*op));
+            
+            put_domain(d);
+        }
+    }
+    break;
 
     default:
         ret = -ENOSYS;
@@ -42,11 +77,4 @@
     }
 
     return ret;
-}
-
-void arch_getdomaininfo_ctxt(struct domain *d, struct vcpu_guest_context *c)
-{ 
-    int i;
-
-       dummy();
 }
diff -Nru a/xen/arch/ia64/domain.c b/xen/arch/ia64/domain.c
--- a/xen/arch/ia64/domain.c    2005-06-19 14:03:47 -04:00
+++ b/xen/arch/ia64/domain.c    2005-06-19 14:03:47 -04:00
@@ -76,7 +76,7 @@
 /* this belongs in include/asm, but there doesn't seem to be a suitable place 
*/
 void free_perdomain_pt(struct domain *d)
 {
-       dummy();
+       printf("free_perdomain_pt: not implemented\n");
        //free_page((unsigned long)d->mm.perdomain_pt);
 }
 
@@ -166,12 +166,34 @@
        free_xenheap_pages(v, KERNEL_STACK_SIZE_ORDER);
 }
 
+static void init_switch_stack(struct vcpu *v)
+{
+       struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + 
IA64_STK_OFFSET) - 1;
+       struct switch_stack *sw = (struct switch_stack *) regs - 1;
+       extern void ia64_ret_from_clone;
+
+       memset(sw, 0, sizeof(struct switch_stack) + sizeof(struct pt_regs));
+       sw->ar_bspstore = (unsigned long)v + IA64_RBS_OFFSET;
+       sw->b0 = (unsigned long) &ia64_ret_from_clone;
+       sw->ar_fpsr = FPSR_DEFAULT;
+       v->arch._thread.ksp = (unsigned long) sw - 16;
+       // stay on kernel stack because may get interrupts!
+       // ia64_ret_from_clone (which b0 gets in new_thread) switches
+       // to user stack
+       v->arch._thread.on_ustack = 0;
+       memset(v->arch._thread.fph,0,sizeof(struct ia64_fpreg)*96);
+}
+
 #ifdef CONFIG_VTI
 void arch_do_createdomain(struct vcpu *v)
 {
        struct domain *d = v->domain;
        struct thread_info *ti = alloc_thread_info(v);
 
+       /* Clear thread_info to clear some important fields, like preempt_count 
*/
+       memset(ti, 0, sizeof(struct thread_info));
+       init_switch_stack(v);
+
        /* If domain is VMX domain, shared info area is created
         * by domain and then domain notifies HV by specific hypercall.
         * If domain is xenolinux, shared info area is created by
@@ -194,9 +216,6 @@
        }
        memset(v->vcpu_info, 0, PAGE_SIZE);
 
-       /* Clear thread_info to clear some important fields, like preempt_count 
*/
-       memset(ti, 0, sizeof(struct thread_info));
-
        /* Allocate per-domain vTLB and vhpt */
        v->arch.vtlb = init_domain_tlb(v);
 
@@ -212,37 +231,37 @@
        d->xen_vaend = 0xf300000000000000;
        d->arch.breakimm = 0x1000;
 
-       // stay on kernel stack because may get interrupts!
-       // ia64_ret_from_clone (which b0 gets in new_thread) switches
-       // to user stack
-       v->arch._thread.on_ustack = 0;
+       d->arch.mm = xmalloc(struct mm_struct);
+       if (unlikely(!d->arch.mm)) {
+               printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
+               return -ENOMEM;
+       }
+       memset(d->arch.mm, 0, sizeof(*d->arch.mm));
+       d->arch.mm->pgd = pgd_alloc(d->arch.mm);
+       if (unlikely(!d->arch.mm->pgd)) {
+               printk("Can't allocate pgd for domain %d\n",d->domain_id);
+               return -ENOMEM;
+       }
+}
 }
 #else // CONFIG_VTI
 void arch_do_createdomain(struct vcpu *v)
 {
        struct domain *d = v->domain;
+       struct thread_info *ti = alloc_thread_info(v);
+
+       /* Clear thread_info to clear some important fields, like preempt_count 
*/
+       memset(ti, 0, sizeof(struct thread_info));
+       init_switch_stack(v);
 
        d->shared_info = (void *)alloc_xenheap_page();
-       v->vcpu_info = (void *)alloc_xenheap_page();
-       if (!v->vcpu_info) {
+       if (!d->shared_info) {
                printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
                while (1);
        }
-       memset(v->vcpu_info, 0, PAGE_SIZE);
-       /* pin mapping */
-       // FIXME: Does this belong here?  Or do only at domain switch time?
-#if 0
-       // this is now done in ia64_new_rr7
-       {
-               /* WARNING: following must be inlined to avoid nested fault */
-               unsigned long psr = ia64_clear_ic();
-               ia64_itr(0x2, IA64_TR_SHARED_INFO, SHAREDINFO_ADDR,
-                pte_val(pfn_pte(ia64_tpa(d->shared_info) >> PAGE_SHIFT, 
PAGE_KERNEL)),
-                PAGE_SHIFT);
-               ia64_set_psr(psr);
-               ia64_srlz_i();
-       }
-#endif
+       memset(d->shared_info, 0, PAGE_SIZE);
+       v->vcpu_info = &(d->shared_info->vcpu_data[0]);
+
        d->max_pages = (128*1024*1024)/PAGE_SIZE; // 128MB default // FIXME
        if ((d->arch.metaphysical_rr0 = allocate_metaphysical_rr0()) == -1UL)
                BUG();
@@ -258,33 +277,63 @@
        d->shared_info_va = 0xf100000000000000;
        d->arch.breakimm = 0x1000;
        v->arch.breakimm = d->arch.breakimm;
-       // stay on kernel stack because may get interrupts!
-       // ia64_ret_from_clone (which b0 gets in new_thread) switches
-       // to user stack
-       v->arch._thread.on_ustack = 0;
+
+       d->arch.mm = xmalloc(struct mm_struct);
+       if (unlikely(!d->arch.mm)) {
+               printk("Can't allocate mm_struct for domain %d\n",d->domain_id);
+               return -ENOMEM;
+       }
+       memset(d->arch.mm, 0, sizeof(*d->arch.mm));
+       d->arch.mm->pgd = pgd_alloc(d->arch.mm);
+       if (unlikely(!d->arch.mm->pgd)) {
+               printk("Can't allocate pgd for domain %d\n",d->domain_id);
+               return -ENOMEM;
+       }
 }
 #endif // CONFIG_VTI
 
-void arch_do_boot_vcpu(struct vcpu *v)
+void arch_getdomaininfo_ctxt(struct vcpu *v, struct vcpu_guest_context *c)
 {
-       return;
+       struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + 
IA64_STK_OFFSET) - 1;
+
+       printf("arch_getdomaininfo_ctxt\n");
+       c->regs = *regs;
+       c->vcpu = v->vcpu_info->arch;
+       c->shared = v->domain->shared_info->arch;
 }
 
 int arch_set_info_guest(struct vcpu *v, struct vcpu_guest_context *c)
 {
-       dummy();
-       return 1;
+       struct pt_regs *regs = (struct pt_regs *) ((unsigned long) v + 
IA64_STK_OFFSET) - 1;
+
+       printf("arch_set_info_guest\n");
+       *regs = c->regs;
+       regs->cr_ipsr = 
IA64_PSR_IT|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_IC|IA64_PSR_I|IA64_PSR_DFH|IA64_PSR_BN|IA64_PSR_SP|IA64_PSR_DI;
+       regs->cr_ipsr |= 2UL << IA64_PSR_CPL0_BIT;
+       regs->ar_rsc |= (2 << 2); /* force PL2/3 */
+
+       v->vcpu_info->arch = c->vcpu;
+       init_all_rr(v);
+
+       // this should be in userspace
+       regs->r28 = dom_fw_setup(v->domain,"nomca nosmp xencons=ttyS 
console=ttyS0",256L);  //FIXME
+       v->vcpu_info->arch.banknum = 1;
+       v->vcpu_info->arch.metaphysical_mode = 1;

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.