[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] [HVM] HVM is now a flag parameter to domain-creation hypercall.



# HG changeset patch
# User kfraser@xxxxxxxxxxxxxxxxxxxxx
# Node ID 7b5115221dfc1c139a8459cc8e95341e7b880ea6
# Parent  f3f492ba8e8d630175415ba27df2a48e9e43913d
[HVM] HVM is now a flag parameter to domain-creation hypercall.
This cleans up HVM start-of-day in Xen and means that the
HVM status of a domain is maintained from cradle to grave.
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
---
 tools/libxc/xc_domain.c                 |    2 +
 tools/libxc/xc_hvm_build.c              |   15 ++++++++
 tools/libxc/xenctrl.h                   |    1 
 tools/python/xen/lowlevel/xc/xc.c       |   19 ++++++-----
 tools/python/xen/xend/XendDomainInfo.py |   12 ++++++-
 xen/arch/ia64/xen/xensetup.c            |    6 +--
 xen/arch/powerpc/mm.c                   |    6 +--
 xen/arch/powerpc/papr/xlate.c           |    2 -
 xen/arch/powerpc/setup.c                |    9 ++---
 xen/arch/x86/domain.c                   |   31 +++++++++++-------
 xen/arch/x86/domctl.c                   |   12 +++----
 xen/arch/x86/hvm/hvm.c                  |   10 ++---
 xen/arch/x86/hvm/svm/svm.c              |    1 
 xen/arch/x86/hvm/svm/vmcb.c             |   18 +++-------
 xen/arch/x86/hvm/vmx/vmcs.c             |   20 ++++-------
 xen/arch/x86/hvm/vmx/vmx.c              |    2 -
 xen/arch/x86/mm.c                       |    2 -
 xen/arch/x86/mm/shadow/common.c         |    4 +-
 xen/arch/x86/mm/shadow/multi.c          |   34 ++++++++++----------
 xen/arch/x86/mm/shadow/types.h          |   10 ++---
 xen/arch/x86/oprofile/xenoprof.c        |    2 -
 xen/arch/x86/setup.c                    |    9 ++---
 xen/arch/x86/traps.c                    |    2 -
 xen/arch/x86/x86_32/domain_page.c       |    2 -
 xen/arch/x86/x86_32/traps.c             |    4 +-
 xen/arch/x86/x86_64/traps.c             |    6 +--
 xen/common/domain.c                     |    7 ++--
 xen/common/domctl.c                     |   16 +++++----
 xen/include/asm-x86/hvm/support.h       |    3 +
 xen/include/asm-x86/processor.h         |    2 -
 xen/include/asm-x86/regs.h              |    2 -
 xen/include/asm-x86/shadow.h            |    4 +-
 xen/include/public/arch-x86_64.h        |    6 ++-
 xen/include/public/domctl.h             |    5 ++
 xen/include/xen/sched.h                 |   54 +++++++++++++++++---------------
 35 files changed, 192 insertions(+), 148 deletions(-)

diff -r f3f492ba8e8d -r 7b5115221dfc tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Wed Nov 01 15:56:55 2006 +0000
+++ b/tools/libxc/xc_domain.c   Wed Nov 01 16:08:19 2006 +0000
@@ -12,6 +12,7 @@ int xc_domain_create(int xc_handle,
 int xc_domain_create(int xc_handle,
                      uint32_t ssidref,
                      xen_domain_handle_t handle,
+                     uint32_t flags,
                      uint32_t *pdomid)
 {
     int err;
@@ -20,6 +21,7 @@ int xc_domain_create(int xc_handle,
     domctl.cmd = XEN_DOMCTL_createdomain;
     domctl.domain = (domid_t)*pdomid;
     domctl.u.createdomain.ssidref = ssidref;
+    domctl.u.createdomain.flags   = flags;
     memcpy(domctl.u.createdomain.handle, handle, sizeof(xen_domain_handle_t));
     if ( (err = do_domctl(xc_handle, &domctl)) != 0 )
         return err;
diff -r f3f492ba8e8d -r 7b5115221dfc tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c        Wed Nov 01 15:56:55 2006 +0000
+++ b/tools/libxc/xc_hvm_build.c        Wed Nov 01 16:08:19 2006 +0000
@@ -261,6 +261,19 @@ static int setup_guest(int xc_handle,
         goto error_out;
     }
 
+    /* HVM domains must be put into shadow mode at the start of day. */
+    /* XXX *After* xc_get_pfn_list()!! */
+    if ( xc_shadow_control(xc_handle, dom, XEN_DOMCTL_SHADOW_OP_ENABLE,
+                           NULL, 0, NULL, 
+                           XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT  |
+                           XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE |
+                           XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL, 
+                           NULL) )
+    {
+        PERROR("Could not enable shadow paging for domain.\n");
+        goto error_out;
+    }        
+
     loadelfimage(image, xc_handle, dom, page_array, &dsi);
 
     if ( (mmu = xc_init_mmu_updates(xc_handle, dom)) == NULL )
@@ -417,6 +430,7 @@ static int xc_hvm_build_internal(int xc_
         goto error_out;
     }
 
+#if 0
     /* HVM domains must be put into shadow mode at the start of day */
     if ( xc_shadow_control(xc_handle, domid, XEN_DOMCTL_SHADOW_OP_ENABLE,
                            NULL, 0, NULL, 
@@ -428,6 +442,7 @@ static int xc_hvm_build_internal(int xc_
         PERROR("Could not enable shadow paging for domain.\n");
         goto error_out;
     }        
+#endif
 
     memset(ctxt, 0, sizeof(*ctxt));
 
diff -r f3f492ba8e8d -r 7b5115221dfc tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Wed Nov 01 15:56:55 2006 +0000
+++ b/tools/libxc/xenctrl.h     Wed Nov 01 16:08:19 2006 +0000
@@ -177,6 +177,7 @@ int xc_domain_create(int xc_handle,
 int xc_domain_create(int xc_handle,
                      uint32_t ssidref,
                      xen_domain_handle_t handle,
+                     uint32_t flags,
                      uint32_t *pdomid);
 
 
diff -r f3f492ba8e8d -r 7b5115221dfc tools/python/xen/lowlevel/xc/xc.c
--- a/tools/python/xen/lowlevel/xc/xc.c Wed Nov 01 15:56:55 2006 +0000
+++ b/tools/python/xen/lowlevel/xc/xc.c Wed Nov 01 16:08:19 2006 +0000
@@ -65,18 +65,17 @@ static PyObject *pyxc_domain_create(XcOb
                                     PyObject *args,
                                     PyObject *kwds)
 {
-    uint32_t dom = 0;
-    int      ret, i;
-    uint32_t ssidref = 0;
+    uint32_t dom = 0, ssidref = 0, flags = 0;
+    int      ret, i, hvm = 0;
     PyObject *pyhandle = NULL;
     xen_domain_handle_t handle = { 
         0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef,
         0xde, 0xad, 0xbe, 0xef, 0xde, 0xad, 0xbe, 0xef };
 
-    static char *kwd_list[] = { "domid", "ssidref", "handle", NULL };
-
-    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiO", kwd_list,
-                                      &dom, &ssidref, &pyhandle))
+    static char *kwd_list[] = { "domid", "ssidref", "handle", "hvm", NULL };
+
+    if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iiOi", kwd_list,
+                                      &dom, &ssidref, &pyhandle, &hvm))
         return NULL;
 
     if ( pyhandle != NULL )
@@ -94,7 +93,11 @@ static PyObject *pyxc_domain_create(XcOb
         }
     }
 
-    if ( (ret = xc_domain_create(self->xc_handle, ssidref, handle, &dom)) < 0 )
+    if ( hvm )
+        flags |= XEN_DOMCTL_CDF_hvm_guest;
+
+    if ( (ret = xc_domain_create(self->xc_handle, ssidref,
+                                 handle, flags, &dom)) < 0 )
         return PyErr_SetFromErrno(xc_error);
 
     return PyInt_FromLong(dom);
diff -r f3f492ba8e8d -r 7b5115221dfc tools/python/xen/xend/XendDomainInfo.py
--- a/tools/python/xen/xend/XendDomainInfo.py   Wed Nov 01 15:56:55 2006 +0000
+++ b/tools/python/xen/xend/XendDomainInfo.py   Wed Nov 01 16:08:19 2006 +0000
@@ -1198,10 +1198,20 @@ class XendDomainInfo:
 
         log.debug('XendDomainInfo.constructDomain')
 
+        hvm = (self._infoIsSet('image') and
+               sxp.name(self.info['image']) == "hvm")
+        if hvm:
+            info = xc.xeninfo()
+            if not 'hvm' in info['xen_caps']:
+                raise VmError("HVM guest support is unavailable: is VT/AMD-V "
+                              "supported by your CPU and enabled in your "
+                              "BIOS?")
+
         self.domid = xc.domain_create(
             domid = 0,
             ssidref = security.get_security_info(self.info, 'ssidref'),
-            handle = uuid.fromString(self.info['uuid']))
+            handle = uuid.fromString(self.info['uuid']),
+            hvm = int(hvm))
 
         if self.domid < 0:
             raise VmError('Creating domain failed: name=%s' %
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/ia64/xen/xensetup.c
--- a/xen/arch/ia64/xen/xensetup.c      Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/ia64/xen/xensetup.c      Wed Nov 01 16:08:19 2006 +0000
@@ -422,7 +422,7 @@ void start_kernel(void)
 
     scheduler_init();
     idle_vcpu[0] = (struct vcpu*) ia64_r13;
-    idle_domain = domain_create(IDLE_DOMAIN_ID);
+    idle_domain = domain_create(IDLE_DOMAIN_ID, 0);
     if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
         BUG();
 
@@ -502,11 +502,11 @@ printk("num_online_cpus=%d, max_cpus=%d\
     expose_p2m_init();
 
     /* Create initial domain 0. */
-    dom0 = domain_create(0);
+    dom0 = domain_create(0, 0);
     if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
         panic("Error creating domain 0\n");
 
-    set_bit(_DOMF_privileged, &dom0->domain_flags);
+    dom0->is_privileged = 1;
 
     /*
      * We're going to setup domain0 using the module(s) that we stashed safely
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/powerpc/mm.c
--- a/xen/arch/powerpc/mm.c     Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/powerpc/mm.c     Wed Nov 01 16:08:19 2006 +0000
@@ -316,8 +316,7 @@ ulong pfn2mfn(struct domain *d, ulong pf
     int t = PFN_TYPE_NONE;
 
     /* quick tests first */
-    if (test_bit(_DOMF_privileged, &d->domain_flags) &&
-        cpu_io_mfn(pfn)) {
+    if (d->is_privileged && cpu_io_mfn(pfn)) {
         t = PFN_TYPE_IO;
         mfn = pfn;
     } else {
@@ -341,8 +340,7 @@ ulong pfn2mfn(struct domain *d, ulong pf
     if (t == PFN_TYPE_NONE) {
         /* This hack allows dom0 to map all memory, necessary to
          * initialize domU state. */
-        if (test_bit(_DOMF_privileged, &d->domain_flags) &&
-            mfn_valid(pfn)) {
+        if (d->is_privileged && mfn_valid(pfn)) {
             struct page_info *pg;
 
             /* page better be allocated to some domain but not the caller */
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/powerpc/papr/xlate.c
--- a/xen/arch/powerpc/papr/xlate.c     Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/powerpc/papr/xlate.c     Wed Nov 01 16:08:19 2006 +0000
@@ -174,7 +174,7 @@ static void h_enter(struct cpu_user_regs
 
     if (mtype == PFN_TYPE_IO) {
         /* only a privilaged dom can access outside IO space */
-        if ( !test_bit(_DOMF_privileged, &d->domain_flags) ) {
+        if ( !d->is_privileged ) {
             regs->gprs[3] =  H_Privilege;
             printk("%s: unprivileged access to physical page: 0x%lx\n",
                    __func__, pfn);
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/powerpc/setup.c
--- a/xen/arch/powerpc/setup.c  Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/powerpc/setup.c  Wed Nov 01 16:08:19 2006 +0000
@@ -157,7 +157,7 @@ static void __init start_of_day(void)
     scheduler_init();
 
     /* create idle domain */
-    idle_domain = domain_create(IDLE_DOMAIN_ID);
+    idle_domain = domain_create(IDLE_DOMAIN_ID, 0);
     if ((idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL))
         BUG();
     set_current(idle_domain->vcpu[0]);
@@ -342,7 +342,7 @@ static void __init __start_xen(multiboot
     start_of_day();
 
     /* Create initial domain 0. */
-    dom0 = domain_create(0);
+    dom0 = domain_create(0, 0);
     if (dom0 == NULL)
         panic("Error creating domain 0\n");
     dom0->max_pages = ~0U;
@@ -355,8 +355,9 @@ static void __init __start_xen(multiboot
      * need to make sure Dom0's vVCPU 0 is pinned to the CPU */
     dom0->vcpu[0]->cpu_affinity = cpumask_of_cpu(0);
 
-    set_bit(_DOMF_privileged, &dom0->domain_flags);
-    /* post-create hooks sets security label */
+    dom0->is_privileged = 1;
+
+    /* Post-create hook sets security label. */
     acm_post_domain0_create(dom0->domain_id);
 
     cmdline = (char *)(mod[0].string ? __va((ulong)mod[0].string) : NULL);
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/domain.c
--- a/xen/arch/x86/domain.c     Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/domain.c     Wed Nov 01 16:08:19 2006 +0000
@@ -157,6 +157,13 @@ int arch_domain_create(struct domain *d)
     int vcpuid, pdpt_order;
     int i;
 
+    if ( is_hvm_domain(d) && !hvm_enabled )
+    {
+        gdprintk(XENLOG_WARNING, "Attempt to create a HVM guest "
+                 "on a non-VT/AMDV platform.\n");
+        return -EINVAL;
+    }
+
     pdpt_order = get_order_from_bytes(PDPT_L1_ENTRIES * sizeof(l1_pgentry_t));
     d->arch.mm_perdomain_pt = alloc_xenheap_pages(pdpt_order);
     if ( d->arch.mm_perdomain_pt == NULL )
@@ -258,7 +265,11 @@ int arch_set_info_guest(
     unsigned long cr3_pfn = INVALID_MFN;
     int i, rc;
 
-    if ( !(c->flags & VGCF_HVM_GUEST) )
+    if ( !!(c->flags & VGCF_hvm_guest) != is_hvm_vcpu(v) )
+        return -EINVAL;
+    c->flags &= ~VGCF_hvm_guest;
+
+    if ( !is_hvm_vcpu(v) )
     {
         fixup_guest_stack_selector(c->user_regs.ss);
         fixup_guest_stack_selector(c->kernel_ss);
@@ -272,15 +283,13 @@ int arch_set_info_guest(
         for ( i = 0; i < 256; i++ )
             fixup_guest_code_selector(c->trap_ctxt[i].cs);
     }
-    else if ( !hvm_enabled )
-      return -EINVAL;
 
     clear_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
-    if ( c->flags & VGCF_I387_VALID )
+    if ( c->flags & VGCF_i387_valid )
         set_bit(_VCPUF_fpu_initialised, &v->vcpu_flags);
 
     v->arch.flags &= ~TF_kernel_mode;
-    if ( (c->flags & VGCF_IN_KERNEL) || (c->flags & VGCF_HVM_GUEST) )
+    if ( (c->flags & VGCF_in_kernel) || is_hvm_vcpu(v)/*???*/ )
         v->arch.flags |= TF_kernel_mode;
 
     memcpy(&v->arch.guest_context, c, sizeof(*c));
@@ -291,7 +300,7 @@ int arch_set_info_guest(
 
     init_int80_direct_trap(v);
 
-    if ( !(c->flags & VGCF_HVM_GUEST) )
+    if ( !is_hvm_vcpu(v) )
     {
         /* IOPL privileges are virtualised. */
         v->arch.iopl = (v->arch.guest_context.user_regs.eflags >> 12) & 3;
@@ -316,7 +325,7 @@ int arch_set_info_guest(
     if ( v->vcpu_id == 0 )
         d->vm_assist = c->vm_assist;
 
-    if ( !(c->flags & VGCF_HVM_GUEST) )
+    if ( !is_hvm_vcpu(v) )
     {
         cr3_pfn = gmfn_to_mfn(d, xen_cr3_to_pfn(c->ctrlreg[3]));
         v->arch.guest_table = pagetable_from_pfn(cr3_pfn);
@@ -325,7 +334,7 @@ int arch_set_info_guest(
     if ( (rc = (int)set_gdt(v, c->gdt_frames, c->gdt_ents)) != 0 )
         return rc;
 
-    if ( c->flags & VGCF_HVM_GUEST )
+    if ( is_hvm_vcpu(v) )
     {
         v->arch.guest_table = pagetable_null();
 
@@ -745,7 +754,7 @@ void context_switch(struct vcpu *prev, s
         /* Re-enable interrupts before restoring state which may fault. */
         local_irq_enable();
 
-        if ( !hvm_guest(next) )
+        if ( !is_hvm_vcpu(next) )
         {
             load_LDT(next);
             load_segments(next);
@@ -835,7 +844,7 @@ unsigned long hypercall_create_continuat
 #if defined(__i386__)
         regs->eax  = op;
 
-        if ( supervisor_mode_kernel || hvm_guest(current) )
+        if ( supervisor_mode_kernel || is_hvm_vcpu(current) )
             regs->eip &= ~31; /* re-execute entire hypercall entry stub */
         else
             regs->eip -= 2;   /* re-execute 'int 0x82' */
@@ -972,7 +981,7 @@ void domain_relinquish_resources(struct 
 #endif
     }
 
-    if ( d->vcpu[0] && hvm_guest(d->vcpu[0]) )
+    if ( is_hvm_domain(d) )
         hvm_relinquish_guest_resources(d);
 
     /* Tear down shadow mode stuff. */
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/domctl.c     Wed Nov 01 16:08:19 2006 +0000
@@ -224,7 +224,7 @@ long arch_do_domctl(
 
             spin_lock(&d->page_alloc_lock);
 
-            if ( hvm_guest(d->vcpu[0]) && shadow_mode_translate(d) )
+            if ( is_hvm_domain(d) && shadow_mode_translate(d) )
             {
                 /* HVM domain: scan P2M to get guaranteed physmap order. */
                 for ( i = 0, gmfn = 0;
@@ -321,7 +321,7 @@ void arch_getdomaininfo_ctxt(
 {
     memcpy(c, &v->arch.guest_context, sizeof(*c));
 
-    if ( hvm_guest(v) )
+    if ( is_hvm_vcpu(v) )
     {
         hvm_store_cpu_guest_regs(v, &c->user_regs, c->ctrlreg);
     }
@@ -334,11 +334,11 @@ void arch_getdomaininfo_ctxt(
 
     c->flags = 0;
     if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
-        c->flags |= VGCF_I387_VALID;
+        c->flags |= VGCF_i387_valid;
     if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
-        c->flags |= VGCF_IN_KERNEL;
-    if ( hvm_guest(v) )
-        c->flags |= VGCF_HVM_GUEST;
+        c->flags |= VGCF_in_kernel;
+    if ( is_hvm_vcpu(v) )
+        c->flags |= VGCF_hvm_guest;
 
     c->ctrlreg[3] = xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table));
 
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/hvm/hvm.c    Wed Nov 01 16:08:19 2006 +0000
@@ -260,12 +260,12 @@ void hvm_release_assist_channel(struct v
 }
 
 
-void hvm_setup_platform(struct domain* d)
+void hvm_setup_platform(struct domain *d)
 {
     struct hvm_domain *platform;
-    struct vcpu *v=current;
-
-    if ( !hvm_guest(v) || (v->vcpu_id != 0) )
+    struct vcpu *v = current;
+
+    if ( !is_hvm_domain(d) || (v->vcpu_id != 0) )
         return;
 
     hvm_zap_iommu_pages(d);
@@ -635,7 +635,7 @@ int hvm_bringup_ap(int vcpuid, int tramp
     struct vcpu_guest_context *ctxt;
     int rc = 0;
 
-    BUG_ON(!hvm_guest(bsp));
+    BUG_ON(!is_hvm_domain(d));
 
     if ( bsp->vcpu_id != 0 )
     {
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/hvm/svm/svm.c
--- a/xen/arch/x86/hvm/svm/svm.c        Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/hvm/svm/svm.c        Wed Nov 01 16:08:19 2006 +0000
@@ -562,7 +562,6 @@ static void svm_init_ap_context(struct v
      */
     ctxt->user_regs.eip = 0x0;
     ctxt->user_regs.cs = (trampoline_vector << 8);
-    ctxt->flags = VGCF_HVM_GUEST;
 }
 
 static void svm_init_hypercall_page(struct domain *d, void *hypercall_page)
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/hvm/svm/vmcb.c
--- a/xen/arch/x86/hvm/svm/vmcb.c       Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/hvm/svm/vmcb.c       Wed Nov 01 16:08:19 2006 +0000
@@ -482,20 +482,14 @@ static void vmcb_dump(unsigned char ch)
     struct vcpu *v;
     
     printk("*********** VMCB Areas **************\n");
-    for_each_domain(d) {
+    for_each_domain ( d )
+    {
+        if ( !is_hvm_domain(d) )
+            continue;
         printk("\n>>> Domain %d <<<\n", d->domain_id);
-        for_each_vcpu(d, v) {
-
-            /* 
-             * Presumably, if a domain is not an HVM guest,
-             * the very first CPU will not pass this test
-             */
-            if (!hvm_guest(v)) {
-                printk("\t\tNot HVM guest\n");
-                break;
-            }
+        for_each_vcpu ( d, v )
+        {
             printk("\tVCPU %d\n", v->vcpu_id);
-
             svm_dump_vmcb("key_handler", v->arch.hvm_svm.vmcb);
         }
     }
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/hvm/vmx/vmcs.c
--- a/xen/arch/x86/hvm/vmx/vmcs.c       Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmcs.c       Wed Nov 01 16:08:19 2006 +0000
@@ -218,7 +218,7 @@ void vmx_vmcs_exit(struct vcpu *v)
 
     /* Don't confuse arch_vmx_do_resume (for @v or @current!) */
     vmx_clear_vmcs(v);
-    if ( hvm_guest(current) )
+    if ( is_hvm_vcpu(current) )
         vmx_load_vmcs(current);
 
     spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
@@ -709,20 +709,14 @@ static void vmcs_dump(unsigned char ch)
     struct vcpu *v;
     
     printk("*********** VMCS Areas **************\n");
-    for_each_domain(d) {
+    for_each_domain ( d )
+    {
+        if ( !is_hvm_domain(d) )
+            continue;
         printk("\n>>> Domain %d <<<\n", d->domain_id);
-        for_each_vcpu(d, v) {
-
-            /* 
-             * Presumably, if a domain is not an HVM guest,
-             * the very first CPU will not pass this test
-             */
-            if (!hvm_guest(v)) {
-                printk("\t\tNot HVM guest\n");
-                break;
-            }
+        for_each_vcpu ( d, v )
+        {
             printk("\tVCPU %d\n", v->vcpu_id);
-
             vmx_vmcs_enter(v);
             vmcs_dump_vcpu();
             vmx_vmcs_exit(v);
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c        Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/hvm/vmx/vmx.c        Wed Nov 01 16:08:19 2006 +0000
@@ -671,8 +671,6 @@ static void vmx_init_ap_context(struct v
     ctxt->user_regs.eip = VMXASSIST_BASE;
     ctxt->user_regs.edx = vcpuid;
     ctxt->user_regs.ebx = trampoline_vector;
-
-    ctxt->flags = VGCF_HVM_GUEST;
 
     /* Virtual IDT is empty at start-of-day. */
     for ( i = 0; i < 256; i++ )
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/mm.c Wed Nov 01 16:08:19 2006 +0000
@@ -1715,7 +1715,7 @@ int new_guest_cr3(unsigned long mfn)
     int okay;
     unsigned long old_base_mfn;
 
-    if ( hvm_guest(v) && !hvm_paging_enabled(v) )
+    if ( is_hvm_domain(d) && !hvm_paging_enabled(v) )
         domain_crash_synchronous();
 
     if ( shadow_mode_refcounts(d) )
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/mm/shadow/common.c   Wed Nov 01 16:08:19 2006 +0000
@@ -2286,7 +2286,7 @@ void sh_update_paging_modes(struct vcpu 
     //
     shadow_detach_old_tables(v);
 
-    if ( !hvm_guest(v) )
+    if ( !is_hvm_domain(d) )
     {
         ///
         /// PV guest
@@ -2394,7 +2394,7 @@ void sh_update_paging_modes(struct vcpu 
             SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d g=%u s=%u "
                           "(was g=%u s=%u)\n",
                           d->domain_id, v->vcpu_id,
-                          hvm_guest(v) ? !!hvm_paging_enabled(v) : 1,
+                          is_hvm_domain(d) ? !!hvm_paging_enabled(v) : 1,
                           v->arch.shadow.mode->guest_levels,
                           v->arch.shadow.mode->shadow_levels,
                           old_mode ? old_mode->guest_levels : 0,
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/mm/shadow/multi.c
--- a/xen/arch/x86/mm/shadow/multi.c    Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/mm/shadow/multi.c    Wed Nov 01 16:08:19 2006 +0000
@@ -202,14 +202,14 @@ guest_supports_superpages(struct vcpu *v
 {
     /* The _PAGE_PSE bit must be honoured in HVM guests, whenever
      * CR4.PSE is set or the guest is in PAE or long mode */
-    return (hvm_guest(v) && (GUEST_PAGING_LEVELS != 2 
+    return (is_hvm_vcpu(v) && (GUEST_PAGING_LEVELS != 2 
                              || (hvm_get_guest_ctrl_reg(v, 4) & X86_CR4_PSE)));
 }
 
 static inline int
 guest_supports_nx(struct vcpu *v)
 {
-    if ( !hvm_guest(v) )
+    if ( !is_hvm_vcpu(v) )
         return cpu_has_nx;
 
     // XXX - fix this!
@@ -769,7 +769,7 @@ _sh_propagate(struct vcpu *v,
     // PV guests in 64-bit mode use two different page tables for user vs
     // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
     // It is always shadowed as present...
-    if ( (GUEST_PAGING_LEVELS == 4) && !hvm_guest(v) )
+    if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_domain(d) )
     {
         sflags |= _PAGE_USER;
     }
@@ -2293,7 +2293,7 @@ static int validate_gl1e(struct vcpu *v,
     gfn = guest_l1e_get_gfn(*new_gl1e);
     gmfn = vcpu_gfn_to_mfn(v, gfn);
 
-    mmio = (hvm_guest(v) && shadow_vcpu_mode_translate(v) && !valid_mfn(gmfn));
+    mmio = (is_hvm_vcpu(v) && shadow_vcpu_mode_translate(v) && 
!valid_mfn(gmfn));
     l1e_propagate_from_guest(v, new_gl1e, _mfn(INVALID_MFN), gmfn, &new_sl1e, 
                              ft_prefetch, mmio);
     
@@ -2523,7 +2523,7 @@ static void sh_prefetch(struct vcpu *v, 
         /* Look at the gfn that the l1e is pointing at */
         gfn = guest_l1e_get_gfn(gl1e);
         gmfn = vcpu_gfn_to_mfn(v, gfn);
-        mmio = ( hvm_guest(v) 
+        mmio = ( is_hvm_vcpu(v) 
                  && shadow_vcpu_mode_translate(v) 
                  && mmio_space(gfn_to_paddr(gfn)) );
 
@@ -2585,7 +2585,8 @@ static int sh_page_fault(struct vcpu *v,
         {
             if ( sh_l1e_is_gnp(sl1e) )
             {
-                if ( likely(!hvm_guest(v) || shadow_vcpu_mode_translate(v)) )
+                if ( likely(!is_hvm_domain(d) ||
+                            shadow_vcpu_mode_translate(v)) )
                 { 
                     /* Not-present in a guest PT: pass to the guest as
                      * a not-present fault (by flipping two bits). */
@@ -2647,7 +2648,7 @@ static int sh_page_fault(struct vcpu *v,
     //
     if ( unlikely(!(guest_l1e_get_flags(gw.eff_l1e) & _PAGE_PRESENT)) )
     {
-        if ( hvm_guest(v) && !shadow_vcpu_mode_translate(v) )
+        if ( is_hvm_domain(d) && !shadow_vcpu_mode_translate(v) )
         {
             /* Not present in p2m map, means this is mmio */
             gpa = va;
@@ -2704,9 +2705,9 @@ static int sh_page_fault(struct vcpu *v,
     /* What mfn is the guest trying to access? */
     gfn = guest_l1e_get_gfn(gw.eff_l1e);
     gmfn = vcpu_gfn_to_mfn(v, gfn);
-    mmio = ( hvm_guest(v) 
-             && shadow_vcpu_mode_translate(v) 
-             && mmio_space(gfn_to_paddr(gfn)) );
+    mmio = (is_hvm_domain(d)
+            && shadow_vcpu_mode_translate(v) 
+            && mmio_space(gfn_to_paddr(gfn)));
 
     if ( !mmio && !valid_mfn(gmfn) )
     {
@@ -2775,14 +2776,15 @@ static int sh_page_fault(struct vcpu *v,
  emulate:
     /* Take the register set we were called with */
     emul_regs = *regs;
-    if ( hvm_guest(v) )
+    if ( is_hvm_domain(d) )
     {
         /* Add the guest's segment selectors, rip, rsp. rflags */ 
         hvm_store_cpu_guest_regs(v, &emul_regs, NULL);
     }
     emul_ctxt.regs = &emul_regs;
     emul_ctxt.cr2 = va;
-    emul_ctxt.mode = hvm_guest(v) ? hvm_guest_x86_mode(v) : X86EMUL_MODE_HOST;
+    emul_ctxt.mode = (is_hvm_domain(d) ?
+                      hvm_guest_x86_mode(v) : X86EMUL_MODE_HOST);
 
     SHADOW_PRINTK("emulate: eip=%#lx\n", emul_regs.eip);
 
@@ -2813,7 +2815,7 @@ static int sh_page_fault(struct vcpu *v,
         goto not_a_shadow_fault;
 
     /* Emulator has changed the user registers: write back */
-    if ( hvm_guest(v) )
+    if ( is_hvm_domain(d) )
     {
         /* Write back the guest's segment selectors, rip, rsp. rflags */ 
         hvm_load_cpu_guest_regs(v, &emul_regs);
@@ -3317,7 +3319,7 @@ sh_update_cr3(struct vcpu *v)
     
 #ifndef NDEBUG 
     /* Double-check that the HVM code has sent us a sane guest_table */
-    if ( hvm_guest(v) )
+    if ( is_hvm_domain(d) )
     {
         gfn_t gfn;
 
@@ -3492,7 +3494,7 @@ sh_update_cr3(struct vcpu *v)
     ///
     if ( shadow_mode_external(d) )
     {
-        ASSERT(hvm_guest(v));
+        ASSERT(is_hvm_domain(d));
 #if SHADOW_PAGING_LEVELS == 3
         /* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */
         v->arch.hvm_vcpu.hw_cr3 = virt_to_maddr(&v->arch.shadow.l3table);
@@ -3890,7 +3892,7 @@ static char * sh_audit_flags(struct vcpu
 {
     if ( (sflags & _PAGE_PRESENT) && !(gflags & _PAGE_PRESENT) )
         return "shadow is present but guest is not present";
-    if ( (sflags & _PAGE_GLOBAL) && !hvm_guest(v) ) 
+    if ( (sflags & _PAGE_GLOBAL) && !is_hvm_vcpu(v) ) 
         return "global bit set in PV shadow";
     if ( (level == 1 || (level == 2 && (gflags & _PAGE_PSE)))
          && ((sflags & _PAGE_DIRTY) && !(gflags & _PAGE_DIRTY)) ) 
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/mm/shadow/types.h
--- a/xen/arch/x86/mm/shadow/types.h    Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/mm/shadow/types.h    Wed Nov 01 16:08:19 2006 +0000
@@ -205,13 +205,13 @@ static inline shadow_l4e_t shadow_l4e_fr
     __sh_linear_l1_table; \
 })
 
-// XXX -- these should not be conditional on hvm_guest(v), but rather on
+// XXX -- these should not be conditional on is_hvm_vcpu(v), but rather on
 //        shadow_mode_external(d)...
 //
 #define sh_linear_l2_table(v) ({ \
     ASSERT(current == (v)); \
     ((shadow_l2e_t *) \
-     (hvm_guest(v) ? __linear_l1_table : __sh_linear_l1_table) + \
+     (is_hvm_vcpu(v) ? __linear_l1_table : __sh_linear_l1_table) + \
      shadow_l1_linear_offset(SH_LINEAR_PT_VIRT_START)); \
 })
 
@@ -219,7 +219,7 @@ static inline shadow_l4e_t shadow_l4e_fr
 #define sh_linear_l3_table(v) ({ \
     ASSERT(current == (v)); \
     ((shadow_l3e_t *) \
-     (hvm_guest(v) ? __linear_l2_table : __sh_linear_l2_table) + \
+     (is_hvm_vcpu(v) ? __linear_l2_table : __sh_linear_l2_table) + \
       shadow_l2_linear_offset(SH_LINEAR_PT_VIRT_START)); \
 })
 
@@ -228,7 +228,7 @@ static inline shadow_l4e_t shadow_l4e_fr
 #define sh_linear_l4_table(v) ({ \
     ASSERT(current == (v)); \
     ((l4_pgentry_t *) \
-     (hvm_guest(v) ? __linear_l3_table : __sh_linear_l3_table) + \
+     (is_hvm_vcpu(v) ? __linear_l3_table : __sh_linear_l3_table) + \
       shadow_l3_linear_offset(SH_LINEAR_PT_VIRT_START)); \
 })
 #endif
@@ -585,7 +585,7 @@ accumulate_guest_flags(struct vcpu *v, w
     // In 64-bit PV guests, the _PAGE_USER bit is implied in all guest
     // entries (since even the guest kernel runs in ring 3).
     //
-    if ( (GUEST_PAGING_LEVELS == 4) && !hvm_guest(v) )
+    if ( (GUEST_PAGING_LEVELS == 4) && !is_hvm_vcpu(v) )
         accumulated_flags |= _PAGE_USER;
 
     return accumulated_flags;
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/oprofile/xenoprof.c
--- a/xen/arch/x86/oprofile/xenoprof.c  Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/oprofile/xenoprof.c  Wed Nov 01 16:08:19 2006 +0000
@@ -701,7 +701,7 @@ int xenoprofile_get_mode(struct vcpu *v,
     if ( !guest_mode(regs) )
         return 2;
 
-    if ( hvm_guest(v) )
+    if ( is_hvm_vcpu(v) )
         return ((regs->cs & 3) != 3);
 
     return guest_kernel_mode(v, regs);  
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/setup.c
--- a/xen/arch/x86/setup.c      Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/setup.c      Wed Nov 01 16:08:19 2006 +0000
@@ -249,7 +249,7 @@ static void __init init_idle_domain(void
     /* Domain creation requires that scheduler structures are initialised. */
     scheduler_init();
 
-    idle_domain = domain_create(IDLE_DOMAIN_ID);
+    idle_domain = domain_create(IDLE_DOMAIN_ID, 0);
     if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
         BUG();
 
@@ -640,12 +640,13 @@ void __init __start_xen(multiboot_info_t
     acm_init(_policy_start, _policy_len);
 
     /* Create initial domain 0. */
-    dom0 = domain_create(0);
+    dom0 = domain_create(0, 0);
     if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
         panic("Error creating domain 0\n");
 
-    set_bit(_DOMF_privileged, &dom0->domain_flags);
-    /* post-create hooks sets security label */
+    dom0->is_privileged = 1;
+
+    /* Post-create hook sets security label. */
     acm_post_domain0_create(dom0->domain_id);
 
     /* Grab the DOM0 command line. */
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/traps.c      Wed Nov 01 16:08:19 2006 +0000
@@ -134,7 +134,7 @@ static void show_guest_stack(struct cpu_
     int i;
     unsigned long *stack, addr;
 
-    if ( hvm_guest(current) )
+    if ( is_hvm_vcpu(current) )
         return;
 
     if ( vm86_mode(regs) )
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/x86_32/domain_page.c
--- a/xen/arch/x86/x86_32/domain_page.c Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/x86_32/domain_page.c Wed Nov 01 16:08:19 2006 +0000
@@ -29,7 +29,7 @@ static inline struct vcpu *mapcache_curr
      * then it means we are running on the idle domain's page table and must
      * therefore use its mapcache.
      */
-    if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !hvm_guest(v) )
+    if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !is_hvm_vcpu(v) )
     {
         /* If we really are idling, perform lazy context switch now. */
         if ( (v = idle_vcpu[smp_processor_id()]) == current )
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/x86_32/traps.c
--- a/xen/arch/x86/x86_32/traps.c       Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/x86_32/traps.c       Wed Nov 01 16:08:19 2006 +0000
@@ -45,7 +45,7 @@ void show_registers(struct cpu_user_regs
     unsigned long fault_crs[8];
     const char *context;
 
-    if ( hvm_guest(current) && guest_mode(regs) )
+    if ( is_hvm_vcpu(current) && guest_mode(regs) )
     {
         context = "hvm";
         hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
@@ -515,7 +515,7 @@ static void hypercall_page_initialise_ri
 
 void hypercall_page_initialise(struct domain *d, void *hypercall_page)
 {
-    if ( hvm_guest(d->vcpu[0]) )
+    if ( is_hvm_domain(d) )
         hvm_hypercall_page_initialise(d, hypercall_page);
     else if ( supervisor_mode_kernel )
         hypercall_page_initialise_ring0_kernel(hypercall_page);
diff -r f3f492ba8e8d -r 7b5115221dfc xen/arch/x86/x86_64/traps.c
--- a/xen/arch/x86/x86_64/traps.c       Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/arch/x86/x86_64/traps.c       Wed Nov 01 16:08:19 2006 +0000
@@ -42,7 +42,7 @@ void show_registers(struct cpu_user_regs
     unsigned long fault_crs[8];
     const char *context;
 
-    if ( hvm_guest(current) && guest_mode(regs) )
+    if ( is_hvm_vcpu(current) && guest_mode(regs) )
     {
         context = "hvm";
         hvm_store_cpu_guest_regs(current, &fault_regs, fault_crs);
@@ -229,7 +229,7 @@ unsigned long do_iret(void)
     regs->rsp    = iret_saved.rsp;
     regs->ss     = iret_saved.ss | 3; /* force guest privilege */
 
-    if ( !(iret_saved.flags & VGCF_IN_SYSCALL) )
+    if ( !(iret_saved.flags & VGCF_in_sycall) )
     {
         regs->entry_vector = 0;
         regs->r11 = iret_saved.r11;
@@ -500,7 +500,7 @@ static void hypercall_page_initialise_ri
 
 void hypercall_page_initialise(struct domain *d, void *hypercall_page)
 {
-    if ( hvm_guest(d->vcpu[0]) )
+    if ( is_hvm_domain(d) )
         hvm_hypercall_page_initialise(d, hypercall_page);
     else
         hypercall_page_initialise_ring3_kernel(hypercall_page);
diff -r f3f492ba8e8d -r 7b5115221dfc xen/common/domain.c
--- a/xen/common/domain.c       Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/common/domain.c       Wed Nov 01 16:08:19 2006 +0000
@@ -114,7 +114,7 @@ struct vcpu *alloc_idle_vcpu(unsigned in
     unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
 
     d = (vcpu_id == 0) ?
-        domain_create(IDLE_DOMAIN_ID) :
+        domain_create(IDLE_DOMAIN_ID, 0) :
         idle_vcpu[cpu_id - vcpu_id]->domain;
     BUG_ON(d == NULL);
 
@@ -124,12 +124,15 @@ struct vcpu *alloc_idle_vcpu(unsigned in
     return v;
 }
 
-struct domain *domain_create(domid_t domid)
+struct domain *domain_create(domid_t domid, unsigned int domcr_flags)
 {
     struct domain *d, **pd;
 
     if ( (d = alloc_domain(domid)) == NULL )
         return NULL;
+
+    if ( domcr_flags & DOMCRF_hvm )
+        d->is_hvm = 1;
 
     rangeset_domain_initialise(d);
 
diff -r f3f492ba8e8d -r 7b5115221dfc xen/common/domctl.c
--- a/xen/common/domctl.c       Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/common/domctl.c       Wed Nov 01 16:08:19 2006 +0000
@@ -241,12 +241,10 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
         struct domain *d;
         domid_t        dom;
         static domid_t rover = 0;
-
-        /*
-         * Running the domain 0 kernel in ring 0 is not compatible
-         * with multiple guests.
-         */
-        if ( supervisor_mode_kernel )
+        unsigned int domcr_flags;
+
+        if ( supervisor_mode_kernel ||
+             (op->u.createdomain.flags & ~XEN_DOMCTL_CDF_hvm_guest) )
             return -EINVAL;
 
         dom = op->domain;
@@ -273,8 +271,12 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
             rover = dom;
         }
 
+        domcr_flags = 0;
+        if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
+            domcr_flags |= DOMCRF_hvm;
+
         ret = -ENOMEM;
-        if ( (d = domain_create(dom)) == NULL )
+        if ( (d = domain_create(dom, domcr_flags)) == NULL )
             break;
 
         memcpy(d->handle, op->u.createdomain.handle,
diff -r f3f492ba8e8d -r 7b5115221dfc xen/include/asm-x86/hvm/support.h
--- a/xen/include/asm-x86/hvm/support.h Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/include/asm-x86/hvm/support.h Wed Nov 01 16:08:19 2006 +0000
@@ -32,7 +32,8 @@
 #define HVM_DEBUG 1
 #endif
 
-#define hvm_guest(v) ((v)->arch.guest_context.flags & VGCF_HVM_GUEST)
+#define is_hvm_domain(d) ((d)->is_hvm)
+#define is_hvm_vcpu(v)   (is_hvm_domain(v->domain))
 
 static inline shared_iopage_t *get_sp(struct domain *d)
 {
diff -r f3f492ba8e8d -r 7b5115221dfc xen/include/asm-x86/processor.h
--- a/xen/include/asm-x86/processor.h   Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/include/asm-x86/processor.h   Wed Nov 01 16:08:19 2006 +0000
@@ -107,7 +107,7 @@
 #define TRAP_deferred_nmi     31
 
 /* Set for entry via SYSCALL. Informs return code to use SYSRETQ not IRETQ. */
-/* NB. Same as VGCF_IN_SYSCALL. No bits in common with any other TRAP_ defn. */
+/* NB. Same as VGCF_in_syscall. No bits in common with any other TRAP_ defn. */
 #define TRAP_syscall         256
 
 /*
diff -r f3f492ba8e8d -r 7b5115221dfc xen/include/asm-x86/regs.h
--- a/xen/include/asm-x86/regs.h        Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/include/asm-x86/regs.h        Wed Nov 01 16:08:19 2006 +0000
@@ -39,7 +39,7 @@ enum EFLAGS {
     /* If a guest frame, it must be have guest privs (unless HVM guest).   */ \
     /* We permit CS==0 which can come from an uninitialised trap entry. */    \
     ASSERT((diff != 0) || vm86_mode(r) || ((r->cs&3) >= GUEST_KERNEL_RPL) ||  \
-           (r->cs == 0) || hvm_guest(current));                              \
+           (r->cs == 0) || is_hvm_vcpu(current));                             \
     /* If not a guest frame, it must be a hypervisor frame. */                \
     ASSERT((diff == 0) || (!vm86_mode(r) && (r->cs == __HYPERVISOR_CS)));     \
     /* Return TRUE if it's a guest frame. */                                  \
diff -r f3f492ba8e8d -r 7b5115221dfc xen/include/asm-x86/shadow.h
--- a/xen/include/asm-x86/shadow.h      Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/include/asm-x86/shadow.h      Wed Nov 01 16:08:19 2006 +0000
@@ -64,7 +64,7 @@
 #define shadow_mode_external(_d)  ((_d)->arch.shadow.mode & SHM2_external)
 
 /* Xen traps & emulates all reads of all page table pages:
- *not yet supported
+ * not yet supported
  */
 #define shadow_mode_trap_reads(_d) ({ (void)(_d); 0; })
 
@@ -77,7 +77,7 @@
 #ifdef __x86_64__
 #define pv_32bit_guest(_v) 0 // not yet supported
 #else
-#define pv_32bit_guest(_v) !hvm_guest(v)
+#define pv_32bit_guest(_v) !is_hvm_vcpu(v)
 #endif
 
 /* The shadow lock.
diff -r f3f492ba8e8d -r 7b5115221dfc xen/include/public/arch-x86_64.h
--- a/xen/include/public/arch-x86_64.h  Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/include/public/arch-x86_64.h  Wed Nov 01 16:08:19 2006 +0000
@@ -152,7 +152,7 @@ typedef unsigned long xen_ulong_t;
  * directly with
  *      orb   $3,1*8(%rsp)
  *      iretq
- * If flags contains VGCF_IN_SYSCALL:
+ * If flags contains VGCF_in_syscall:
  *   Restore RAX, RIP, RFLAGS, RSP.
  *   Discard R11, RCX, CS, SS.
  * Otherwise:
@@ -160,7 +160,9 @@ typedef unsigned long xen_ulong_t;
  * All other registers are saved on hypercall entry and restored to user.
  */
 /* Guest exited in SYSCALL context? Return to guest with SYSRET? */
-#define VGCF_IN_SYSCALL (1<<8)
+#define _VGCF_in_syscall 8
+#define VGCF_in_syscall  (1<<_VGCF_in_syscall)
+#define VGCF_IN_SYSCALL  VGCF_in_syscall
 struct iret_context {
     /* Top of stack (%rsp at point of hypercall). */
     uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
diff -r f3f492ba8e8d -r 7b5115221dfc xen/include/public/domctl.h
--- a/xen/include/public/domctl.h       Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/include/public/domctl.h       Wed Nov 01 16:08:19 2006 +0000
@@ -16,7 +16,7 @@
 
 #include "xen.h"
 
-#define XEN_DOMCTL_INTERFACE_VERSION 0x00000003
+#define XEN_DOMCTL_INTERFACE_VERSION 0x00000004
 
 struct xenctl_cpumap {
     XEN_GUEST_HANDLE(uint8_t) bitmap;
@@ -32,6 +32,9 @@ struct xen_domctl_createdomain {
     /* IN parameters */
     uint32_t ssidref;
     xen_domain_handle_t handle;
+#define _XEN_DOMCTL_CDF_hvm_guest 0
+#define XEN_DOMCTL_CDF_hvm_guest  (1U<<_XEN_DOMCTL_CDF_hvm_guest)
+    uint32_t flags;
 };
 typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
diff -r f3f492ba8e8d -r 7b5115221dfc xen/include/xen/sched.h
--- a/xen/include/xen/sched.h   Wed Nov 01 15:56:55 2006 +0000
+++ b/xen/include/xen/sched.h   Wed Nov 01 16:08:19 2006 +0000
@@ -143,6 +143,12 @@ struct domain
     struct rangeset *irq_caps;
 
     unsigned long    domain_flags;
+
+    /* Boolean: Is this an HVM guest? */
+    char             is_hvm;
+
+    /* Boolean: Is this guest fully privileged (aka dom0)? */
+    char             is_privileged;
 
     spinlock_t       pause_lock;
     unsigned int     pause_count;
@@ -237,26 +243,30 @@ static inline void get_knownalive_domain
     ASSERT(!(atomic_read(&d->refcnt) & DOMAIN_DESTROYED));
 }
 
-extern struct domain *domain_create(domid_t domid);
-extern int construct_dom0(
+struct domain *domain_create(domid_t domid, unsigned int domcr_flags);
+ /* DOMCRF_hvm: Create an HVM domain, as opposed to a PV domain. */
+#define _DOMCRF_hvm 0
+#define DOMCRF_hvm  (1U<<_DOMCRF_hvm)
+
+int construct_dom0(
     struct domain *d,
     unsigned long image_start, unsigned long image_len, 
     unsigned long initrd_start, unsigned long initrd_len,
     char *cmdline);
-extern int set_info_guest(struct domain *d, xen_domctl_vcpucontext_t *);
+int set_info_guest(struct domain *d, xen_domctl_vcpucontext_t *);
 
 struct domain *find_domain_by_id(domid_t dom);
-extern void domain_destroy(struct domain *d);
-extern void domain_kill(struct domain *d);
-extern void domain_shutdown(struct domain *d, u8 reason);
-extern void domain_pause_for_debugger(void);
+void domain_destroy(struct domain *d);
+void domain_kill(struct domain *d);
+void domain_shutdown(struct domain *d, u8 reason);
+void domain_pause_for_debugger(void);
 
 /*
  * Mark specified domain as crashed. This function always returns, even if the
  * caller is the specified domain. The domain is not synchronously descheduled
  * from any processor.
  */
-extern void __domain_crash(struct domain *d);
+void __domain_crash(struct domain *d);
 #define domain_crash(d) do {                                              \
     printk("domain_crash called from %s:%d\n", __FILE__, __LINE__);       \
     __domain_crash(d);                                                    \
@@ -266,7 +276,7 @@ extern void __domain_crash(struct domain
  * Mark current domain as crashed and synchronously deschedule from the local
  * processor. This function never returns.
  */
-extern void __domain_crash_synchronous(void) __attribute__((noreturn));
+void __domain_crash_synchronous(void) __attribute__((noreturn));
 #define domain_crash_synchronous() do {                                   \
     printk("domain_crash_sync called from %s:%d\n", __FILE__, __LINE__);  \
     __domain_crash_synchronous();                                         \
@@ -293,7 +303,7 @@ void vcpu_sleep_sync(struct vcpu *d);
  * this call will ensure that all its state is committed to memory and that
  * no CPU is using critical state (e.g., page tables) belonging to the VCPU.
  */
-extern void sync_vcpu_execstate(struct vcpu *v);
+void sync_vcpu_execstate(struct vcpu *v);
 
 /*
  * Called by the scheduler to switch to another VCPU. This function must
@@ -302,7 +312,7 @@ extern void sync_vcpu_execstate(struct v
  * implementing lazy context switching, it suffices to ensure that invoking
  * sync_vcpu_execstate() will switch and commit @prev's state.
  */
-extern void context_switch(
+void context_switch(
     struct vcpu *prev, 
     struct vcpu *next);
 
@@ -312,10 +322,10 @@ extern void context_switch(
  * saved to memory. Alternatively, if implementing lazy context switching,
  * ensure that invoking sync_vcpu_execstate() will switch and commit @prev.
  */
-extern void context_saved(struct vcpu *prev);
+void context_saved(struct vcpu *prev);
 
 /* Called by the scheduler to continue running the current VCPU. */
-extern void continue_running(
+void continue_running(
     struct vcpu *same);
 
 void startup_cpu_idle_loop(void);
@@ -396,26 +406,23 @@ extern struct domain *domain_list;
 /*
  * Per-domain flags (domain_flags).
  */
- /* Is this domain privileged? */
-#define _DOMF_privileged       0
-#define DOMF_privileged        (1UL<<_DOMF_privileged)
  /* Guest shut itself down for some reason. */
-#define _DOMF_shutdown         1
+#define _DOMF_shutdown         0
 #define DOMF_shutdown          (1UL<<_DOMF_shutdown)
  /* Death rattle. */
-#define _DOMF_dying            2
+#define _DOMF_dying            1
 #define DOMF_dying             (1UL<<_DOMF_dying)
  /* Domain is paused by controller software. */
-#define _DOMF_ctrl_pause       3
+#define _DOMF_ctrl_pause       2
 #define DOMF_ctrl_pause        (1UL<<_DOMF_ctrl_pause)
  /* Domain is being debugged by controller software. */
-#define _DOMF_debugging        4
+#define _DOMF_debugging        3
 #define DOMF_debugging         (1UL<<_DOMF_debugging)
  /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
-#define _DOMF_polling          5
+#define _DOMF_polling          4
 #define DOMF_polling           (1UL<<_DOMF_polling)
  /* Domain is paused by the hypervisor? */
-#define _DOMF_paused           6
+#define _DOMF_paused           5
 #define DOMF_paused            (1UL<<_DOMF_paused)
 
 static inline int vcpu_runnable(struct vcpu *v)
@@ -450,8 +457,7 @@ static inline void vcpu_unblock(struct v
         vcpu_wake(v);
 }
 
-#define IS_PRIV(_d)                                         \
-    (test_bit(_DOMF_privileged, &(_d)->domain_flags))
+#define IS_PRIV(_d) ((_d)->is_privileged)
 
 #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.