[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH v2 1/3] x86: remove PVHv1 code



> -----Original Message-----
> From: Roger Pau Monne [mailto:roger.pau@xxxxxxxxxx]
> Sent: 28 February 2017 17:40
> To: xen-devel@xxxxxxxxxxxxxxxxxxxx
> Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>; Ian Jackson
> <Ian.Jackson@xxxxxxxxxx>; Wei Liu <wei.liu2@xxxxxxxxxx>; Elena Ufimtseva
> <elena.ufimtseva@xxxxxxxxxx>; Jan Beulich <jbeulich@xxxxxxxx>; Andrew
> Cooper <Andrew.Cooper3@xxxxxxxxxx>; Paul Durrant
> <Paul.Durrant@xxxxxxxxxx>; Jun Nakajima <jun.nakajima@xxxxxxxxx>; Kevin
> Tian <kevin.tian@xxxxxxxxx>; George Dunlap <George.Dunlap@xxxxxxxxxx>;
> Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>; Tamas K Lengyel
> <tamas@xxxxxxxxxxxxx>
> Subject: [PATCH v2 1/3] x86: remove PVHv1 code
> 
> This removal applies to both the hypervisor and the toolstack side of PVHv1.
> 
> Note that on the toolstack side there's one hiccup: on xl the "pvh"
> configuration option is translated to builder="hvm",
> device_model_version="none".  This is done because otherwise xl would
> start
> parsing PV like options, and filling the PV struct at libxl_domain_build_info
> (which in turn pollutes the HVM one because it's a union).
> 
> Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>

Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx>

> ---
> Changes since v1:
>  - Remove dom0pvh option from the command line docs.
>  - Bump domctl interface version due to the removed CDF flag.
> 
> ---
> Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
> Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
> Cc: Elena Ufimtseva <elena.ufimtseva@xxxxxxxxxx>
> Cc: Jan Beulich <jbeulich@xxxxxxxx>
> Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
> Cc: Paul Durrant <paul.durrant@xxxxxxxxxx>
> Cc: Jun Nakajima <jun.nakajima@xxxxxxxxx>
> Cc: Kevin Tian <kevin.tian@xxxxxxxxx>
> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
> Cc: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
> Cc: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
> ---
>  docs/man/xl.cfg.pod.5.in            |  10 +-
>  docs/misc/pvh-readme.txt            |  63 --------
>  docs/misc/xen-command-line.markdown |   7 -
>  tools/debugger/gdbsx/xg/xg_main.c   |   4 +-
>  tools/libxc/include/xc_dom.h        |   1 -
>  tools/libxc/include/xenctrl.h       |   2 +-
>  tools/libxc/xc_cpuid_x86.c          |  13 +-
>  tools/libxc/xc_dom_core.c           |   9 --
>  tools/libxc/xc_dom_x86.c            |  49 +++---
>  tools/libxc/xc_domain.c             |   1 -
>  tools/libxl/libxl_create.c          |  31 ++--
>  tools/libxl/libxl_dom.c             |   1 -
>  tools/libxl/libxl_internal.h        |   1 -
>  tools/libxl/libxl_x86.c             |   7 +-
>  tools/xl/xl_parse.c                 |  10 +-
>  xen/arch/x86/cpu/vpmu.c             |   3 +-
>  xen/arch/x86/domain.c               |  42 +-----
>  xen/arch/x86/domain_build.c         | 287 
> +-----------------------------------
>  xen/arch/x86/domctl.c               |   7 +-
>  xen/arch/x86/hvm/hvm.c              |  81 +---------
>  xen/arch/x86/hvm/hypercall.c        |   4 +-
>  xen/arch/x86/hvm/io.c               |   2 -
>  xen/arch/x86/hvm/ioreq.c            |   3 +-
>  xen/arch/x86/hvm/irq.c              |   3 -
>  xen/arch/x86/hvm/vmx/vmcs.c         |  35 +----
>  xen/arch/x86/hvm/vmx/vmx.c          |  12 +-
>  xen/arch/x86/mm.c                   |   2 +-
>  xen/arch/x86/mm/p2m-pt.c            |   2 +-
>  xen/arch/x86/mm/p2m.c               |   6 +-
>  xen/arch/x86/physdev.c              |   8 -
>  xen/arch/x86/setup.c                |   7 -
>  xen/arch/x86/time.c                 |  27 ----
>  xen/common/domain.c                 |   2 -
>  xen/common/domctl.c                 |  10 --
>  xen/common/kernel.c                 |   5 -
>  xen/common/vm_event.c               |   8 +-
>  xen/include/asm-x86/domain.h        |   1 -
>  xen/include/asm-x86/hvm/hvm.h       |   3 -
>  xen/include/public/domctl.h         |  14 +-
>  xen/include/xen/sched.h             |   9 +-
>  40 files changed, 96 insertions(+), 696 deletions(-)
>  delete mode 100644 docs/misc/pvh-readme.txt
> 
> diff --git a/docs/man/xl.cfg.pod.5.in b/docs/man/xl.cfg.pod.5.in
> index 505c111..da1fdd7 100644
> --- a/docs/man/xl.cfg.pod.5.in
> +++ b/docs/man/xl.cfg.pod.5.in
> @@ -1064,6 +1064,12 @@ FIFO-based event channel ABI support up to
> 131,071 event channels.
>  Other guests are limited to 4095 (64-bit x86 and ARM) or 1023 (32-bit
>  x86).
> 
> +=item B<pvh=BOOLEAN>
> +
> +Selects whether to run this PV guest in an HVM container. Default is 0.
> +Note that this option is equivalent to setting builder="hvm" and
> +device_model_version="none"
> +
>  =back
> 
>  =head2 Paravirtualised (PV) Guest Specific Options
> @@ -1108,10 +1114,6 @@ if your particular guest kernel does not require this
> behaviour then
>  it is safe to allow this to be enabled but you may wish to disable it
>  anyway.
> 
> -=item B<pvh=BOOLEAN>
> -
> -Selects whether to run this PV guest in an HVM container. Default is 0.
> -
>  =back
> 
>  =head2 Fully-virtualised (HVM) Guest Specific Options
> diff --git a/docs/misc/pvh-readme.txt b/docs/misc/pvh-readme.txt
> deleted file mode 100644
> index c5b3de4..0000000
> --- a/docs/misc/pvh-readme.txt
> +++ /dev/null
> @@ -1,63 +0,0 @@
> -
> -PVH : an x86 PV guest running in an HVM container.
> -
> -See: http://blog.xen.org/index.php/2012/10/23/the-paravirtualization-
> spectrum-part-1-the-ends-of-the-spectrum/
> -
> -At the moment HAP is required for PVH.
> -
> -At present the only PVH guest is an x86 64bit PV linux. Patches are at:
> -   git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen.git
> -
> -A PVH guest kernel must support following features, as defined for linux
> -in arch/x86/xen/xen-head.S:
> -
> -   #define FEATURES_PVH "|writable_descriptor_tables" \
> -                        "|auto_translated_physmap"    \
> -                        "|supervisor_mode_kernel"     \
> -                        "|hvm_callback_vector"
> -
> -In a nutshell:
> -* the guest uses auto translate:
> - - p2m is managed by xen
> - - pagetables are owned by the guest
> - - mmu_update hypercall not available
> -* it uses event callback and not vlapic emulation,
> -* IDT is native, so set_trap_table hcall is also N/A for a PVH guest.
> -
> -For a full list of hcalls supported for PVH, see pvh_hypercall64_table
> -in arch/x86/hvm/hvm.c in xen.  From the ABI prespective, it's mostly a
> -PV guest with auto translate, although it does use hvm_op for setting
> -callback vector, and has a special version of arch_set_guest_info for 
> bringing
> -up secondary cpus.
> -
> -The initial phase targets the booting of a 64bit UP/SMP linux guest in PVH
> -mode. This is done by adding: pvh=1 in the config file. xl, and not xm, is
> -supported. Phase I patches are broken into three parts:
> -   - xen changes for booting of 64bit PVH guest
> -   - tools changes for creating a PVH guest
> -   - boot of 64bit dom0 in PVH mode.
> -
> -To boot 64bit dom0 in PVH mode, add dom0pvh to grub xen command line.
> -
> -Following fixme's exist in the code:
> -  - arch/x86/time.c: support more tsc modes.
> -
> -Following remain to be done for PVH:
> -   - Get rid of PVH mode, make it just HVM with some flags set
> -   - implement arch_get_info_guest() for pvh.
> -   - Investigate what else needs to be done for VMI support.
> -   - AMD port.
> -   - 32bit PVH guest support in both linux and xen. Xen changes are tagged
> -     "32bitfixme".
> -   - Add support for monitoring guest behavior. See hvm_memory_event*
> functions
> -     in hvm.c
> -   - vcpu hotplug support
> -   - Live migration of PVH guests.
> -   - Avail PVH dom0 of posted interrupts. (This will be a big win).
> -
> -
> -Note, any emails to me must be cc'd to xen devel mailing list. OTOH, please
> -cc me on PVH emails to the xen devel mailing list.
> -
> -Mukesh Rathor
> -mukesh.rathor [at] oracle [dot] com
> diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-
> command-line.markdown
> index 3acbb33..e069594 100644
> --- a/docs/misc/xen-command-line.markdown
> +++ b/docs/misc/xen-command-line.markdown
> @@ -675,13 +675,6 @@ Flag that makes a dom0 boot in PVHv2 mode.
> 
>  Flag that makes a dom0 use shadow paging.
> 
> -### dom0pvh
> -> `= <boolean>`
> -
> -> Default: `false`
> -
> -Flag that makes a 64bit dom0 boot in PVH mode. No 32bit support at
> present.
> -
>  ### dtuart (ARM)
>  > `= path [:options]`
> 
> diff --git a/tools/debugger/gdbsx/xg/xg_main.c
> b/tools/debugger/gdbsx/xg/xg_main.c
> index 8c8a402..7ebf914 100644
> --- a/tools/debugger/gdbsx/xg/xg_main.c
> +++ b/tools/debugger/gdbsx/xg/xg_main.c
> @@ -79,7 +79,6 @@ int xgtrc_on = 0;
>  struct xen_domctl domctl;         /* just use a global domctl */
> 
>  static int     _hvm_guest;        /* hvm guest? 32bit HVMs have 64bit 
> context */
> -static int     _pvh_guest;        /* PV guest in HVM container */
>  static domid_t _dom_id;           /* guest domid */
>  static int     _max_vcpu_id;      /* thus max_vcpu_id+1 VCPUs */
>  static int     _dom0_fd;          /* fd of /dev/privcmd */
> @@ -308,7 +307,6 @@ xg_attach(int domid, int guest_bitness)
> 
>      _max_vcpu_id = domctl.u.getdomaininfo.max_vcpu_id;
>      _hvm_guest = (domctl.u.getdomaininfo.flags &
> XEN_DOMINF_hvm_guest);
> -    _pvh_guest = (domctl.u.getdomaininfo.flags &
> XEN_DOMINF_pvh_guest);
>      return _max_vcpu_id;
>  }
> 
> @@ -369,7 +367,7 @@ _change_TF(vcpuid_t which_vcpu, int guest_bitness,
> int setit)
>      int sz = sizeof(anyc);
> 
>      /* first try the MTF for hvm guest. otherwise do manually */
> -    if (_hvm_guest || _pvh_guest) {
> +    if (_hvm_guest) {
>          domctl.u.debug_op.vcpu = which_vcpu;
>          domctl.u.debug_op.op = setit ?
> XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_ON :
>                                         XEN_DOMCTL_DEBUG_OP_SINGLE_STEP_OFF;
> diff --git a/tools/libxc/include/xc_dom.h b/tools/libxc/include/xc_dom.h
> index 608cbc2..b416eb5 100644
> --- a/tools/libxc/include/xc_dom.h
> +++ b/tools/libxc/include/xc_dom.h
> @@ -164,7 +164,6 @@ struct xc_dom_image {
>      domid_t console_domid;
>      domid_t xenstore_domid;
>      xen_pfn_t shared_info_mfn;
> -    int pvh_enabled;
> 
>      xc_interface *xch;
>      domid_t guest_domid;
> diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
> index a48981a..a7083f8 100644
> --- a/tools/libxc/include/xenctrl.h
> +++ b/tools/libxc/include/xenctrl.h
> @@ -443,7 +443,7 @@ typedef struct xc_dominfo {
>      uint32_t      ssidref;
>      unsigned int  dying:1, crashed:1, shutdown:1,
>                    paused:1, blocked:1, running:1,
> -                  hvm:1, debugged:1, pvh:1, xenstore:1, hap:1;
> +                  hvm:1, debugged:1, xenstore:1, hap:1;
>      unsigned int  shutdown_reason; /* only meaningful if shutdown==1 */
>      unsigned long nr_pages; /* current number, not maximum */
>      unsigned long nr_outstanding_pages;
> diff --git a/tools/libxc/xc_cpuid_x86.c b/tools/libxc/xc_cpuid_x86.c
> index 35ecca1..1bedf05 100644
> --- a/tools/libxc/xc_cpuid_x86.c
> +++ b/tools/libxc/xc_cpuid_x86.c
> @@ -167,7 +167,6 @@ struct cpuid_domain_info
>      } vendor;
> 
>      bool hvm;
> -    bool pvh;
>      uint64_t xfeature_mask;
> 
>      uint32_t *featureset;
> @@ -231,7 +230,6 @@ static int get_cpuid_domain_info(xc_interface *xch,
> domid_t domid,
>          return -ESRCH;
> 
>      info->hvm = di.hvm;
> -    info->pvh = di.pvh;
> 
>      info->featureset = calloc(host_nr_features, sizeof(*info->featureset));
>      if ( !info->featureset )
> @@ -682,13 +680,10 @@ static void sanitise_featureset(struct
> cpuid_domain_info *info)
>                  clear_bit(X86_FEATURE_SYSCALL, info->featureset);
>          }
> 
> -        if ( !info->pvh )
> -        {
> -            clear_bit(X86_FEATURE_PSE, info->featureset);
> -            clear_bit(X86_FEATURE_PSE36, info->featureset);
> -            clear_bit(X86_FEATURE_PGE, info->featureset);
> -            clear_bit(X86_FEATURE_PAGE1GB, info->featureset);
> -        }
> +        clear_bit(X86_FEATURE_PSE, info->featureset);
> +        clear_bit(X86_FEATURE_PSE36, info->featureset);
> +        clear_bit(X86_FEATURE_PGE, info->featureset);
> +        clear_bit(X86_FEATURE_PAGE1GB, info->featureset);
>      }
> 
>      if ( info->xfeature_mask == 0 )
> diff --git a/tools/libxc/xc_dom_core.c b/tools/libxc/xc_dom_core.c
> index 36cd3c8..cf40343 100644
> --- a/tools/libxc/xc_dom_core.c
> +++ b/tools/libxc/xc_dom_core.c
> @@ -896,15 +896,6 @@ int xc_dom_parse_image(struct xc_dom_image
> *dom)
>          goto err;
>      }
> 
> -    if ( dom->pvh_enabled )
> -    {
> -        const char *pvh_features = "writable_descriptor_tables|"
> -                                   "auto_translated_physmap|"
> -                                   "supervisor_mode_kernel|"
> -                                   "hvm_callback_vector";
> -        elf_xen_parse_features(pvh_features, dom->f_requested, NULL);
> -    }
> -
>      /* check features */
>      for ( i = 0; i < XENFEAT_NR_SUBMAPS; i++ )
>      {
> diff --git a/tools/libxc/xc_dom_x86.c b/tools/libxc/xc_dom_x86.c
> index 6495e7f..c176c00 100644
> --- a/tools/libxc/xc_dom_x86.c
> +++ b/tools/libxc/xc_dom_x86.c
> @@ -373,7 +373,7 @@ static x86_pgentry_t get_pg_prot_x86(struct
> xc_dom_image *dom, int l,
>      unsigned m;
> 
>      prot = domx86->params->lvl_prot[l];
> -    if ( l > 0 || dom->pvh_enabled )
> +    if ( l > 0 )
>          return prot;
> 
>      for ( m = 0; m < domx86->n_mappings; m++ )
> @@ -870,18 +870,15 @@ static int vcpu_x86_32(struct xc_dom_image *dom)
>      DOMPRINTF("%s: cr3: pfn 0x%" PRIpfn " mfn 0x%" PRIpfn "",
>                __FUNCTION__, dom->pgtables_seg.pfn, cr3_pfn);
> 
> -    if ( !dom->pvh_enabled )
> -    {
> -        ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_32;
> -        ctxt->user_regs.es = FLAT_KERNEL_DS_X86_32;
> -        ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_32;
> -        ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_32;
> -        ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_32;
> -        ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_32;
> -
> -        ctxt->kernel_ss = ctxt->user_regs.ss;
> -        ctxt->kernel_sp = ctxt->user_regs.esp;
> -    }
> +    ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_32;
> +    ctxt->user_regs.es = FLAT_KERNEL_DS_X86_32;
> +    ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_32;
> +    ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_32;
> +    ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_32;
> +    ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_32;
> +
> +    ctxt->kernel_ss = ctxt->user_regs.ss;
> +    ctxt->kernel_sp = ctxt->user_regs.esp;
> 
>      rc = xc_vcpu_setcontext(dom->xch, dom->guest_domid, 0, &any_ctx);
>      if ( rc != 0 )
> @@ -916,18 +913,15 @@ static int vcpu_x86_64(struct xc_dom_image *dom)
>      DOMPRINTF("%s: cr3: pfn 0x%" PRIpfn " mfn 0x%" PRIpfn "",
>                __FUNCTION__, dom->pgtables_seg.pfn, cr3_pfn);
> 
> -    if ( !dom->pvh_enabled )
> -    {
> -        ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_64;
> -        ctxt->user_regs.es = FLAT_KERNEL_DS_X86_64;
> -        ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_64;
> -        ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_64;
> -        ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_64;
> -        ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_64;
> -
> -        ctxt->kernel_ss = ctxt->user_regs.ss;
> -        ctxt->kernel_sp = ctxt->user_regs.esp;
> -    }
> +    ctxt->user_regs.ds = FLAT_KERNEL_DS_X86_64;
> +    ctxt->user_regs.es = FLAT_KERNEL_DS_X86_64;
> +    ctxt->user_regs.fs = FLAT_KERNEL_DS_X86_64;
> +    ctxt->user_regs.gs = FLAT_KERNEL_DS_X86_64;
> +    ctxt->user_regs.ss = FLAT_KERNEL_SS_X86_64;
> +    ctxt->user_regs.cs = FLAT_KERNEL_CS_X86_64;
> +
> +    ctxt->kernel_ss = ctxt->user_regs.ss;
> +    ctxt->kernel_sp = ctxt->user_regs.esp;
> 
>      rc = xc_vcpu_setcontext(dom->xch, dom->guest_domid, 0, &any_ctx);
>      if ( rc != 0 )
> @@ -1106,7 +1100,7 @@ static int meminit_pv(struct xc_dom_image *dom)
>      rc = x86_compat(dom->xch, dom->guest_domid, dom->guest_type);
>      if ( rc )
>          return rc;
> -    if ( xc_dom_feature_translated(dom) && !dom->pvh_enabled )
> +    if ( xc_dom_feature_translated(dom) )
>      {
>          dom->shadow_enabled = 1;
>          rc = x86_shadow(dom->xch, dom->guest_domid);
> @@ -1594,9 +1588,6 @@ static int map_grant_table_frames(struct
> xc_dom_image *dom)
>  {
>      int i, rc;
> 
> -    if ( dom->pvh_enabled )
> -        return 0;
> -
>      for ( i = 0; ; i++ )
>      {
>          rc = xc_domain_add_to_physmap(dom->xch, dom->guest_domid,
> diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
> index d862e53..ea3f193 100644
> --- a/tools/libxc/xc_domain.c
> +++ b/tools/libxc/xc_domain.c
> @@ -370,7 +370,6 @@ int xc_domain_getinfo(xc_interface *xch,
>          info->running  =
> !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_running);
>          info->hvm      =
> !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hvm_guest);
>          info->debugged =
> !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_debugged);
> -        info->pvh      =
> !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_pvh_guest);
>          info->xenstore =
> !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_xs_domain);
>          info->hap      = !!(domctl.u.getdomaininfo.flags&XEN_DOMINF_hap);
> 
> diff --git a/tools/libxl/libxl_create.c b/tools/libxl/libxl_create.c
> index e741b9a..c23ba2f 100644
> --- a/tools/libxl/libxl_create.c
> +++ b/tools/libxl/libxl_create.c
> @@ -38,9 +38,6 @@ int libxl__domain_create_info_setdefault(libxl__gc *gc,
>      if (c_info->type == LIBXL_DOMAIN_TYPE_HVM) {
>          libxl_defbool_setdefault(&c_info->hap, true);
>          libxl_defbool_setdefault(&c_info->oos, true);
> -    } else {
> -        libxl_defbool_setdefault(&c_info->pvh, false);
> -        libxl_defbool_setdefault(&c_info->hap, libxl_defbool_val(c_info-
> >pvh));
>      }
> 
>      libxl_defbool_setdefault(&c_info->run_hotplug_scripts, true);
> @@ -475,8 +472,6 @@ int libxl__domain_build(libxl__gc *gc,
> 
>          break;
>      case LIBXL_DOMAIN_TYPE_PV:
> -        state->pvh_enabled = libxl_defbool_val(d_config->c_info.pvh);
> -
>          ret = libxl__build_pv(gc, domid, info, state);
>          if (ret)
>              goto out;
> @@ -536,14 +531,6 @@ int libxl__domain_make(libxl__gc *gc,
> libxl_domain_config *d_config,
>          flags |= XEN_DOMCTL_CDF_hvm_guest;
>          flags |= libxl_defbool_val(info->hap) ? XEN_DOMCTL_CDF_hap : 0;
>          flags |= libxl_defbool_val(info->oos) ? 0 : XEN_DOMCTL_CDF_oos_off;
> -    } else if (libxl_defbool_val(info->pvh)) {
> -        flags |= XEN_DOMCTL_CDF_pvh_guest;
> -        if (!libxl_defbool_val(info->hap)) {
> -            LOGD(ERROR, *domid, "HAP must be on for PVH");
> -            rc = ERROR_INVAL;
> -            goto out;
> -        }
> -        flags |= XEN_DOMCTL_CDF_hap;
>      }
> 
>      /* Ultimately, handle is an array of 16 uint8_t, same as uuid */
> @@ -859,6 +846,24 @@ static void initiate_domain_create(libxl__egc *egc,
>          goto error_out;
>      }
> 
> +    libxl_defbool_setdefault(&d_config->c_info.pvh, false);
> +    if (libxl_defbool_val(d_config->c_info.pvh)) {
> +        if (d_config->c_info.type != LIBXL_DOMAIN_TYPE_HVM) {
> +            LOGD(ERROR, domid, "Invalid domain type for PVH: %s",
> +                 libxl_domain_type_to_string(d_config->c_info.type));
> +            ret = ERROR_INVAL;
> +            goto error_out;
> +        }
> +        if (d_config->b_info.device_model_version !=
> +            LIBXL_DEVICE_MODEL_VERSION_NONE) {
> +            LOGD(ERROR, domid, "Invalid device model version for PVH: %s",
> +                 libxl_device_model_version_to_string(
> +                     d_config->b_info.device_model_version));
> +            ret = ERROR_INVAL;
> +            goto error_out;
> +        }
> +    }
> +
>      /* If target_memkb is smaller than max_memkb, the subsequent call
>       * to libxc when building HVM domain will enable PoD mode.
>       */
> diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
> index d519c8d..e133962 100644
> --- a/tools/libxl/libxl_dom.c
> +++ b/tools/libxl/libxl_dom.c
> @@ -690,7 +690,6 @@ int libxl__build_pv(libxl__gc *gc, uint32_t domid,
>          return ERROR_FAIL;
>      }
> 
> -    dom->pvh_enabled = state->pvh_enabled;
>      dom->container_type = XC_DOM_PV_CONTAINER;
> 
>      LOG(DEBUG, "pv kernel mapped %d path %s", state->pv_kernel.mapped,
> state->pv_kernel.path);
> diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
> index 5bbede5..7722665 100644
> --- a/tools/libxl/libxl_internal.h
> +++ b/tools/libxl/libxl_internal.h
> @@ -1129,7 +1129,6 @@ typedef struct {
>      libxl__file_reference pv_kernel;
>      libxl__file_reference pv_ramdisk;
>      const char * pv_cmdline;
> -    bool pvh_enabled;
> 
>      xen_vmemrange_t *vmemranges;
>      uint32_t num_vmemranges;
> diff --git a/tools/libxl/libxl_x86.c b/tools/libxl/libxl_x86.c
> index 5da7504..eb6bf66 100644
> --- a/tools/libxl/libxl_x86.c
> +++ b/tools/libxl/libxl_x86.c
> @@ -338,11 +338,8 @@ int libxl__arch_domain_create(libxl__gc *gc,
> libxl_domain_config *d_config,
>      if (rtc_timeoffset)
>          xc_domain_set_time_offset(ctx->xch, domid, rtc_timeoffset);
> 
> -    if (d_config->b_info.type == LIBXL_DOMAIN_TYPE_HVM ||
> -        libxl_defbool_val(d_config->c_info.pvh)) {
> -
> -        unsigned long shadow;
> -        shadow = (d_config->b_info.shadow_memkb + 1023) / 1024;
> +    if (d_config->b_info.type == LIBXL_DOMAIN_TYPE_HVM) {
> +        unsigned long shadow = (d_config->b_info.shadow_memkb + 1023) /
> 1024;
>          xc_shadow_control(ctx->xch, domid,
> XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION, NULL, 0, &shadow, 0,
> NULL);
>      }
> 
> diff --git a/tools/xl/xl_parse.c b/tools/xl/xl_parse.c
> index 1ef0c27..bac40b7 100644
> --- a/tools/xl/xl_parse.c
> +++ b/tools/xl/xl_parse.c
> @@ -760,7 +760,15 @@ void parse_config_data(const char *config_source,
>          !strncmp(buf, "hvm", strlen(buf)))
>          c_info->type = LIBXL_DOMAIN_TYPE_HVM;
> 
> -    xlu_cfg_get_defbool(config, "pvh", &c_info->pvh, 0);
> +    if (!xlu_cfg_get_defbool(config, "pvh", &c_info->pvh, 0)) {
> +        /* NB: we need to set the type here, or else we will fall into
> +         * the PV path, and the set of options will be completely wrong
> +         * (even more because the PV and HVM options are inside an union).
> +         */
> +        c_info->type = LIBXL_DOMAIN_TYPE_HVM;
> +        b_info->device_model_version =
> LIBXL_DEVICE_MODEL_VERSION_NONE;
> +    }
> +
>      xlu_cfg_get_defbool(config, "hap", &c_info->hap, 0);
> 
>      if (xlu_cfg_replace_string (config, "name", &c_info->name, 0)) {
> diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
> index c8615e8..d319dea 100644
> --- a/xen/arch/x86/cpu/vpmu.c
> +++ b/xen/arch/x86/cpu/vpmu.c
> @@ -225,8 +225,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
>          if ( !vpmu->xenpmu_data )
>              return;
> 
> -        if ( is_pvh_vcpu(sampling) &&
> -             !(vpmu_mode & XENPMU_MODE_ALL) &&
> +        if ( !(vpmu_mode & XENPMU_MODE_ALL) &&
>               !vpmu->arch_vpmu_ops->do_interrupt(regs) )
>              return;
> 
> diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
> index 7d3071e..75ded25 100644
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -328,7 +328,7 @@ int switch_compat(struct domain *d)
> 
>      if ( is_hvm_domain(d) || d->tot_pages != 0 )
>          return -EACCES;
> -    if ( is_pv_32bit_domain(d) || is_pvh_32bit_domain(d) )
> +    if ( is_pv_32bit_domain(d) )
>          return 0;
> 
>      d->arch.has_32bit_shinfo = 1;
> @@ -339,12 +339,7 @@ int switch_compat(struct domain *d)
>      {
>          rc = setup_compat_arg_xlat(v);
>          if ( !rc )
> -        {
> -            if ( !is_pvh_domain(d) )
> -                rc = setup_compat_l4(v);
> -            else
> -                rc = hvm_set_mode(v, 4);
> -        }
> +            rc = setup_compat_l4(v);
> 
>          if ( rc )
>              goto undo_and_fail;
> @@ -363,7 +358,7 @@ int switch_compat(struct domain *d)
>      {
>          free_compat_arg_xlat(v);
> 
> -        if ( !is_pvh_domain(d) && !pagetable_is_null(v->arch.guest_table) )
> +        if ( !pagetable_is_null(v->arch.guest_table) )
>              release_compat_l4(v);
>      }
> 
> @@ -873,7 +868,7 @@ int arch_set_info_guest(
> 
>      /* The context is a compat-mode one if the target domain is compat-
> mode;
>       * we expect the tools to DTRT even in compat-mode callers. */
> -    compat = is_pv_32bit_domain(d) || is_pvh_32bit_domain(d);
> +    compat = is_pv_32bit_domain(d);
> 
>  #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
>      flags = c(flags);
> @@ -925,18 +920,6 @@ int arch_set_info_guest(
>               (c(ldt_ents) > 8192) )
>              return -EINVAL;
>      }
> -    else if ( is_pvh_domain(d) )
> -    {
> -        if ( c(ctrlreg[0]) || c(ctrlreg[1]) || c(ctrlreg[2]) ||
> -             c(ctrlreg[4]) || c(ctrlreg[5]) || c(ctrlreg[6]) ||
> -             c(ctrlreg[7]) ||  c(ldt_base) || c(ldt_ents) ||
> -             c(user_regs.cs) || c(user_regs.ss) || c(user_regs.es) ||
> -             c(user_regs.ds) || c(user_regs.fs) || c(user_regs.gs) ||
> -             c(kernel_ss) || c(kernel_sp) || c(gdt_ents) ||
> -             (!compat && (c.nat->gs_base_kernel ||
> -              c.nat->fs_base || c.nat->gs_base_user)) )
> -            return -EINVAL;
> -    }
> 
>      v->fpu_initialised = !!(flags & VGCF_I387_VALID);
> 
> @@ -992,21 +975,7 @@ int arch_set_info_guest(
>              v->arch.debugreg[i] = c(debugreg[i]);
> 
>          hvm_set_info_guest(v);
> -
> -        if ( is_hvm_domain(d) || v->is_initialised )
> -            goto out;
> -
> -        /* NB: No need to use PV cr3 un-pickling macros */
> -        cr3_gfn = c(ctrlreg[3]) >> PAGE_SHIFT;
> -        cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC);
> -
> -        v->arch.cr3 = page_to_maddr(cr3_page);
> -        v->arch.hvm_vcpu.guest_cr[3] = c(ctrlreg[3]);
> -        v->arch.guest_table = pagetable_from_page(cr3_page);
> -
> -        ASSERT(paging_mode_enabled(d));
> -
> -        goto pvh_skip_pv_stuff;
> +        goto out;
>      }
> 
>      init_int80_direct_trap(v);
> @@ -1259,7 +1228,6 @@ int arch_set_info_guest(
> 
>      clear_bit(_VPF_in_reset, &v->pause_flags);
> 
> - pvh_skip_pv_stuff:
>      if ( v->vcpu_id == 0 )
>          update_domain_wallclock_time(d);
> 
> diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
> index 506780f..88f65cb 100644
> --- a/xen/arch/x86/domain_build.c
> +++ b/xen/arch/x86/domain_build.c
> @@ -471,141 +471,6 @@ static void __init
> process_dom0_ioports_disable(struct domain *dom0)
>      }
>  }
> 
> -static __init void pvh_add_mem_mapping(struct domain *d, unsigned long
> gfn,
> -                                       unsigned long mfn, unsigned long 
> nr_mfns)
> -{
> -    unsigned long i;
> -    p2m_access_t a;
> -    mfn_t omfn;
> -    p2m_type_t t;
> -    int rc;
> -
> -    for ( i = 0; i < nr_mfns; i++ )
> -    {
> -        if ( !iomem_access_permitted(d, mfn + i, mfn + i) )
> -        {
> -            omfn = get_gfn_query_unlocked(d, gfn + i, &t);
> -            guest_physmap_remove_page(d, _gfn(gfn + i), omfn,
> PAGE_ORDER_4K);
> -            continue;
> -        }
> -
> -        if ( rangeset_contains_singleton(mmio_ro_ranges, mfn + i) )
> -            a = p2m_access_r;
> -        else
> -            a = p2m_access_rw;
> -
> -        if ( (rc = set_mmio_p2m_entry(d, gfn + i, _mfn(mfn + i),
> -                                      PAGE_ORDER_4K, a)) )
> -            panic("pvh_add_mem_mapping: gfn:%lx mfn:%lx i:%ld rc:%d\n",
> -                  gfn, mfn, i, rc);
> -        if ( !(i & 0xfffff) )
> -                process_pending_softirqs();
> -    }
> -}
> -
> -/*
> - * Set the 1:1 map for all non-RAM regions for dom 0. Thus, dom0 will have
> - * the entire io region mapped in the EPT/NPT.
> - *
> - * pvh fixme: The following doesn't map MMIO ranges when they sit above
> the
> - *            highest E820 covered address.
> - */
> -static __init void pvh_map_all_iomem(struct domain *d, unsigned long
> nr_pages)
> -{
> -    unsigned long start_pfn, end_pfn, end = 0, start = 0;
> -    const struct e820entry *entry;
> -    unsigned long nump, nmap, navail, mfn, nr_holes = 0;
> -    unsigned int i;
> -    struct page_info *page;
> -    int rc;
> -
> -    for ( i = 0, entry = e820.map; i < e820.nr_map; i++, entry++ )
> -    {
> -        end = entry->addr + entry->size;
> -
> -        if ( entry->type == E820_RAM || entry->type == E820_UNUSABLE ||
> -             i == e820.nr_map - 1 )
> -        {
> -            start_pfn = PFN_DOWN(start);
> -
> -            /* Unused RAM areas are marked UNUSABLE, so skip them too */
> -            if ( entry->type == E820_RAM || entry->type == E820_UNUSABLE )
> -                end_pfn = PFN_UP(entry->addr);
> -            else
> -                end_pfn = PFN_UP(end);
> -
> -            if ( start_pfn < end_pfn )
> -            {
> -                nump = end_pfn - start_pfn;
> -                /* Add pages to the mapping */
> -                pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
> -                if ( start_pfn < nr_pages )
> -                    nr_holes += (end_pfn < nr_pages) ?
> -                                    nump : (nr_pages - start_pfn);
> -            }
> -            start = end;
> -        }
> -    }
> -
> -    /*
> -     * Some BIOSes may not report io space above ram that is less than 4GB.
> So
> -     * we map any non-ram upto 4GB.
> -     */
> -    if ( end < GB(4) )
> -    {
> -        start_pfn = PFN_UP(end);
> -        end_pfn = (GB(4)) >> PAGE_SHIFT;
> -        nump = end_pfn - start_pfn;
> -        pvh_add_mem_mapping(d, start_pfn, start_pfn, nump);
> -    }
> -
> -    /*
> -     * Add the memory removed by the holes at the end of the
> -     * memory map.
> -     */
> -    page = page_list_first(&d->page_list);
> -    for ( i = 0, entry = e820.map; i < e820.nr_map && nr_holes > 0;
> -          i++, entry++ )
> -    {
> -        if ( entry->type != E820_RAM )
> -            continue;
> -
> -        end_pfn = PFN_UP(entry->addr + entry->size);
> -        if ( end_pfn <= nr_pages )
> -            continue;
> -
> -        navail = end_pfn - nr_pages;
> -        nmap = min(navail, nr_holes);
> -        nr_holes -= nmap;
> -        start_pfn = max_t(unsigned long, nr_pages, PFN_DOWN(entry->addr));
> -        /*
> -         * Populate this memory region using the pages
> -         * previously removed by the MMIO holes.
> -         */
> -        do
> -        {
> -            mfn = page_to_mfn(page);
> -            if ( get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY )
> -                continue;
> -
> -            rc = guest_physmap_add_page(d, _gfn(start_pfn), _mfn(mfn), 0);
> -            if ( rc != 0 )
> -                panic("Unable to add gpfn %#lx mfn %#lx to Dom0 physmap: %d",
> -                      start_pfn, mfn, rc);
> -            start_pfn++;
> -            nmap--;
> -            if ( !(nmap & 0xfffff) )
> -                process_pending_softirqs();
> -        } while ( ((page = page_list_next(page, &d->page_list)) != NULL)
> -                  && nmap );
> -        ASSERT(nmap == 0);
> -        if ( page == NULL )
> -            break;
> -    }
> -
> -    ASSERT(nr_holes == 0);
> -}
> -
>  static __init void pvh_setup_e820(struct domain *d, unsigned long
> nr_pages)
>  {
>      struct e820entry *entry, *entry_guest;
> @@ -676,12 +541,6 @@ static __init void pvh_setup_e820(struct domain *d,
> unsigned long nr_pages)
>  static __init void dom0_update_physmap(struct domain *d, unsigned long
> pfn,
>                                     unsigned long mfn, unsigned long 
> vphysmap_s)
>  {
> -    if ( is_pvh_domain(d) )
> -    {
> -        int rc = guest_physmap_add_page(d, _gfn(pfn), _mfn(mfn), 0);
> -        BUG_ON(rc);
> -        return;
> -    }
>      if ( !is_pv_32bit_domain(d) )
>          ((unsigned long *)vphysmap_s)[pfn] = mfn;
>      else
> @@ -690,78 +549,6 @@ static __init void dom0_update_physmap(struct
> domain *d, unsigned long pfn,
>      set_gpfn_from_mfn(mfn, pfn);
>  }
> 
> -/* Replace mfns with pfns in dom0 page tables */
> -static __init void pvh_fixup_page_tables_for_hap(struct vcpu *v,
> -                                                 unsigned long v_start,
> -                                                 unsigned long v_end)
> -{
> -    int i, j, k;
> -    l4_pgentry_t *pl4e, *l4start;
> -    l3_pgentry_t *pl3e;
> -    l2_pgentry_t *pl2e;
> -    l1_pgentry_t *pl1e;
> -    unsigned long cr3_pfn;
> -
> -    ASSERT(paging_mode_enabled(v->domain));
> -
> -    l4start = map_domain_page(_mfn(pagetable_get_pfn(v-
> >arch.guest_table)));
> -
> -    /* Clear entries prior to guest L4 start */
> -    pl4e = l4start + l4_table_offset(v_start);
> -    memset(l4start, 0, (unsigned long)pl4e - (unsigned long)l4start);
> -
> -    for ( ; pl4e <= l4start + l4_table_offset(v_end - 1); pl4e++ )
> -    {
> -        pl3e = map_l3t_from_l4e(*pl4e);
> -        for ( i = 0; i < PAGE_SIZE / sizeof(*pl3e); i++, pl3e++ )
> -        {
> -            if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
> -                continue;
> -
> -            pl2e = map_l2t_from_l3e(*pl3e);
> -            for ( j = 0; j < PAGE_SIZE / sizeof(*pl2e); j++, pl2e++ )
> -            {
> -                if ( !(l2e_get_flags(*pl2e)  & _PAGE_PRESENT) )
> -                    continue;
> -
> -                pl1e = map_l1t_from_l2e(*pl2e);
> -                for ( k = 0; k < PAGE_SIZE / sizeof(*pl1e); k++, pl1e++ )
> -                {
> -                    if ( !(l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
> -                        continue;
> -
> -                    *pl1e = 
> l1e_from_pfn(get_gpfn_from_mfn(l1e_get_pfn(*pl1e)),
> -                                         l1e_get_flags(*pl1e));
> -                }
> -                unmap_domain_page(pl1e);
> -                *pl2e = l2e_from_pfn(get_gpfn_from_mfn(l2e_get_pfn(*pl2e)),
> -                                     l2e_get_flags(*pl2e));
> -            }
> -            unmap_domain_page(pl2e);
> -            *pl3e = l3e_from_pfn(get_gpfn_from_mfn(l3e_get_pfn(*pl3e)),
> -                                 l3e_get_flags(*pl3e));
> -        }
> -        unmap_domain_page(pl3e);
> -        *pl4e = l4e_from_pfn(get_gpfn_from_mfn(l4e_get_pfn(*pl4e)),
> -                             l4e_get_flags(*pl4e));
> -    }
> -
> -    /* Clear entries post guest L4. */
> -    if ( (unsigned long)pl4e & (PAGE_SIZE - 1) )
> -        memset(pl4e, 0, PAGE_SIZE - ((unsigned long)pl4e & (PAGE_SIZE - 1)));
> -
> -    unmap_domain_page(l4start);
> -
> -    cr3_pfn = get_gpfn_from_mfn(paddr_to_pfn(v->arch.cr3));
> -    v->arch.hvm_vcpu.guest_cr[3] = pfn_to_paddr(cr3_pfn);
> -
> -    /*
> -     * Finally, we update the paging modes (hap_update_paging_modes).
> This will
> -     * create monitor_table for us, update v->arch.cr3, and update vmcs.cr3.
> -     */
> -    paging_update_paging_modes(v);
> -}
> -
>  static __init void mark_pv_pt_pages_rdonly(struct domain *d,
>                                             l4_pgentry_t *l4start,
>                                             unsigned long vpt_start,
> @@ -1053,8 +840,6 @@ static int __init construct_dom0_pv(
>      l3_pgentry_t *l3tab = NULL, *l3start = NULL;
>      l2_pgentry_t *l2tab = NULL, *l2start = NULL;
>      l1_pgentry_t *l1tab = NULL, *l1start = NULL;
> -    paddr_t shared_info_paddr = 0;
> -    u32 save_pvh_pg_mode = 0;
> 
>      /*
>       * This fully describes the memory layout of the initial domain. All
> @@ -1135,13 +920,6 @@ static int __init construct_dom0_pv(
>              rc = -EINVAL;
>              goto out;
>          }
> -        if ( is_pvh_domain(d) &&
> -             !test_bit(XENFEAT_hvm_callback_vector, parms.f_supported) )
> -        {
> -            printk("Kernel does not support PVH mode\n");
> -            rc = -EINVAL;
> -            goto out;
> -        }
>      }
> 
>      if ( compat32 )
> @@ -1207,12 +985,6 @@ static int __init construct_dom0_pv(
>                          sizeof(struct start_info) +
>                          sizeof(struct dom0_vga_console_info));
> 
> -    if ( is_pvh_domain(d) )
> -    {
> -        shared_info_paddr = round_pgup(vstartinfo_end) - v_start;
> -        vstartinfo_end   += PAGE_SIZE;
> -    }
> -
>      vpt_start        = round_pgup(vstartinfo_end);
>      for ( nr_pt_pages = 2; ; nr_pt_pages++ )
>      {
> @@ -1458,11 +1230,6 @@ static int __init construct_dom0_pv(
>          setup_dom0_vcpu(d, i, cpu);
>      }
> 
> -    /*
> -     * pvh: we temporarily disable d->arch.paging.mode so that we can build
> cr3
> -     * needed to run on dom0's page tables.
> -     */
> -    save_pvh_pg_mode = d->arch.paging.mode;
>      d->arch.paging.mode = 0;
> 
>      /* Set up CR3 value for write_ptbase */
> @@ -1532,25 +1299,6 @@ static int __init construct_dom0_pv(
>                           nr_pages);
>      }
> 
> -    /*
> -     * We enable paging mode again so guest_physmap_add_page and
> -     * paging_set_allocation will do the right thing for us.
> -     */
> -    d->arch.paging.mode = save_pvh_pg_mode;
> -
> -    if ( is_pvh_domain(d) )
> -    {
> -        bool preempted;
> -
> -        do {
> -            preempted = false;
> -            paging_set_allocation(d, dom0_paging_pages(d, nr_pages),
> -                                  &preempted);
> -            process_pending_softirqs();
> -        } while ( preempted );
> -    }
> -
> -
>      /* Write the phys->machine and machine->phys table entries. */
>      for ( pfn = 0; pfn < count; pfn++ )
>      {
> @@ -1628,15 +1376,6 @@ static int __init construct_dom0_pv(
>          si->console.dom0.info_size = sizeof(struct dom0_vga_console_info);
>      }
> 
> -    /*
> -     * PVH: We need to update si->shared_info while we are on dom0 page
> tables,
> -     * but need to defer the p2m update until after we have fixed up the
> -     * page tables for PVH so that the m2p for the si pte entry returns
> -     * correct pfn.
> -     */
> -    if ( is_pvh_domain(d) )
> -        si->shared_info = shared_info_paddr;
> -
>      if ( is_pv_32bit_domain(d) )
>          xlat_start_info(si, XLAT_start_info_console_dom0);
> 
> @@ -1670,16 +1409,8 @@ static int __init construct_dom0_pv(
>      regs->_eflags = X86_EFLAGS_IF;
> 
>  #ifdef CONFIG_SHADOW_PAGING
> -    if ( opt_dom0_shadow )
> -    {
> -        if ( is_pvh_domain(d) )
> -        {
> -            printk("Unsupported option dom0_shadow for PVH\n");
> -            return -EINVAL;
> -        }
> -        if ( paging_enable(d, PG_SH_enable) == 0 )
> -            paging_update_paging_modes(v);
> -    }
> +    if ( opt_dom0_shadow && paging_enable(d, PG_SH_enable) == 0 )
> +        paging_update_paging_modes(v);
>  #endif
> 
>      /*
> @@ -1696,20 +1427,6 @@ static int __init construct_dom0_pv(
>          printk(" Xen warning: dom0 kernel broken ELF: %s\n",
>                 elf_check_broken(&elf));
> 
> -    if ( is_pvh_domain(d) )
> -    {
> -        /* finally, fixup the page table, replacing mfns with pfns */
> -        pvh_fixup_page_tables_for_hap(v, v_start, v_end);
> -
> -        /* the pt has correct pfn for si, now update the mfn in the p2m */
> -        mfn = virt_to_mfn(d->shared_info);
> -        pfn = shared_info_paddr >> PAGE_SHIFT;
> -        dom0_update_physmap(d, pfn, mfn, 0);
> -
> -        pvh_map_all_iomem(d, nr_pages);
> -        pvh_setup_e820(d, nr_pages);
> -    }
> -
>      if ( d->domain_id == hardware_domid )
>          iommu_hwdom_init(d);
> 
> diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
> index a3dd276..2c1893d 100644
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -618,9 +618,8 @@ long arch_do_domctl(
>          break;
> 
>      case XEN_DOMCTL_get_address_size:
> -        domctl->u.address_size.size =
> -            (is_pv_32bit_domain(d) || is_pvh_32bit_domain(d)) ?
> -            32 : BITS_PER_LONG;
> +        domctl->u.address_size.size = is_pv_32bit_domain(d) ? 32 :
> +                                                              BITS_PER_LONG;
>          copyback = 1;
>          break;
> 
> @@ -1493,7 +1492,7 @@ void arch_get_info_guest(struct vcpu *v,
> vcpu_guest_context_u c)
>  {
>      unsigned int i;
>      const struct domain *d = v->domain;
> -    bool_t compat = is_pv_32bit_domain(d) || is_pvh_32bit_domain(d);
> +    bool_t compat = is_pv_32bit_domain(d);
>  #define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld))
> 
>      if ( !is_pv_domain(d) )
> diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
> index 9eeb0a2..b95b500 100644
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -180,9 +180,6 @@ static int __init hvm_enable(void)
>          printk("\n");
>      }
> 
> -    if ( !fns->pvh_supported )
> -        printk(XENLOG_INFO "HVM: PVH mode not supported on this
> platform\n");
> -
>      if ( !opt_altp2m_enabled )
>          hvm_funcs.altp2m_supported = 0;
> 
> @@ -431,10 +428,6 @@ u64 hvm_get_guest_tsc_fixed(struct vcpu *v,
> uint64_t at_tsc)
> 
>  void hvm_migrate_timers(struct vcpu *v)
>  {
> -    /* PVH doesn't use rtc and emulated timers, it uses pvclock mechanism.
> */
> -    if ( is_pvh_vcpu(v) )
> -        return;
> -
>      rtc_migrate_timers(v);
>      pt_migrate(v);
>  }
> @@ -594,19 +587,6 @@ static int hvm_print_line(
>      return X86EMUL_OKAY;
>  }
> 
> -static int handle_pvh_io(
> -    int dir, unsigned int port, unsigned int bytes, uint32_t *val)
> -{
> -    struct domain *currd = current->domain;
> -
> -    if ( dir == IOREQ_WRITE )
> -        guest_io_write(port, bytes, *val, currd);
> -    else
> -        *val = guest_io_read(port, bytes, currd);
> -
> -    return X86EMUL_OKAY;
> -}
> -
>  int hvm_domain_initialise(struct domain *d)
>  {
>      int rc;
> @@ -618,22 +598,6 @@ int hvm_domain_initialise(struct domain *d)
>          return -EINVAL;
>      }
> 
> -    if ( is_pvh_domain(d) )
> -    {
> -        if ( !hvm_funcs.pvh_supported )
> -        {
> -            printk(XENLOG_G_WARNING "Attempt to create a PVH guest "
> -                   "on a system without necessary hardware support\n");
> -            return -EINVAL;
> -        }
> -        if ( !hap_enabled(d) )
> -        {
> -            printk(XENLOG_G_INFO "PVH guest must have HAP on\n");
> -            return -EINVAL;
> -        }
> -
> -    }
> -
>      spin_lock_init(&d->arch.hvm_domain.irq_lock);
>      spin_lock_init(&d->arch.hvm_domain.uc_lock);
>      spin_lock_init(&d->arch.hvm_domain.write_map.lock);
> @@ -675,12 +639,6 @@ int hvm_domain_initialise(struct domain *d)
> 
>      hvm_ioreq_init(d);
> 
> -    if ( is_pvh_domain(d) )
> -    {
> -        register_portio_handler(d, 0, 0x10003, handle_pvh_io);
> -        return 0;
> -    }
> -
>      hvm_init_guest_time(d);
> 
>      d->arch.hvm_domain.params[HVM_PARAM_TRIPLE_FAULT_REASON] =
> SHUTDOWN_reboot;
> @@ -723,9 +681,6 @@ int hvm_domain_initialise(struct domain *d)
> 
>  void hvm_domain_relinquish_resources(struct domain *d)
>  {
> -    if ( is_pvh_domain(d) )
> -        return;
> -
>      if ( hvm_funcs.nhvm_domain_relinquish_resources )
>          hvm_funcs.nhvm_domain_relinquish_resources(d);
> 
> @@ -754,9 +709,6 @@ void hvm_domain_destroy(struct domain *d)
> 
>      hvm_destroy_cacheattr_region_list(d);
> 
> -    if ( is_pvh_domain(d) )
> -        return;
> -
>      hvm_funcs.domain_destroy(d);
>      rtc_deinit(d);
>      stdvga_deinit(d);
> @@ -1525,13 +1477,6 @@ int hvm_vcpu_initialise(struct vcpu *v)
> 
>      v->arch.hvm_vcpu.inject_event.vector = HVM_EVENT_VECTOR_UNSET;
> 
> -    if ( is_pvh_domain(d) )
> -    {
> -        /* This is for hvm_long_mode_enabled(v). */
> -        v->arch.hvm_vcpu.guest_efer = EFER_LMA | EFER_LME;
> -        return 0;
> -    }
> -
>      rc = setup_compat_arg_xlat(v); /* teardown: free_compat_arg_xlat() */
>      if ( rc != 0 )
>          goto fail4;
> @@ -1869,9 +1814,6 @@ int hvm_hap_nested_page_fault(paddr_t gpa,
> unsigned long gla,
>              __put_gfn(hostp2m, gfn);
> 
>          rc = 0;
> -        if ( unlikely(is_pvh_domain(currd)) )
> -            goto out;
> -
>          if ( !handle_mmio_with_translation(gla, gpa >> PAGE_SHIFT, npfec) )
>              hvm_inject_hw_exception(TRAP_gp_fault, 0);
>          rc = 1;
> @@ -2211,15 +2153,6 @@ int hvm_set_cr0(unsigned long value, bool_t
> may_defer)
>           (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PG )
>          goto gpf;
> 
> -    /* A pvh is not expected to change to real mode. */
> -    if ( is_pvh_domain(d) &&
> -         (value & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PG | X86_CR0_PE)
> )
> -    {
> -        printk(XENLOG_G_WARNING
> -               "PVH attempting to turn off PE/PG. CR0:%lx\n", value);
> -        goto gpf;
> -    }
> -
>      if ( may_defer && unlikely(v->domain-
> >arch.monitor.write_ctrlreg_enabled &
>                                 monitor_ctrlreg_bitmask(VM_EVENT_X86_CR0)) )
>      {
> @@ -2384,11 +2317,6 @@ int hvm_set_cr4(unsigned long value, bool_t
> may_defer)
>                          "EFER.LMA is set");
>              goto gpf;
>          }
> -        if ( is_pvh_vcpu(v) )
> -        {
> -            HVM_DBG_LOG(DBG_LEVEL_1, "32-bit PVH guest cleared CR4.PAE");
> -            goto gpf;
> -        }
>      }
> 
>      old_cr = v->arch.hvm_vcpu.guest_cr[4];
> @@ -3542,8 +3470,7 @@ int hvm_msr_write_intercept(unsigned int msr,
> uint64_t msr_content,
>          break;
> 
>      case MSR_IA32_APICBASE:
> -        if ( unlikely(is_pvh_vcpu(v)) ||
> -             !vlapic_msr_set(vcpu_vlapic(v), msr_content) )
> +        if ( !vlapic_msr_set(vcpu_vlapic(v), msr_content) )
>              goto gp_fault;
>          break;
> 
> @@ -4066,8 +3993,7 @@ static int hvmop_set_param(
>          return -ESRCH;
> 
>      rc = -EINVAL;
> -    if ( !has_hvm_container_domain(d) ||
> -         (is_pvh_domain(d) && (a.index != HVM_PARAM_CALLBACK_IRQ)) )
> +    if ( !has_hvm_container_domain(d) )
>          goto out;
> 
>      rc = hvm_allow_set_param(d, &a);
> @@ -4322,8 +4248,7 @@ static int hvmop_get_param(
>          return -ESRCH;
> 
>      rc = -EINVAL;
> -    if ( !has_hvm_container_domain(d) ||
> -         (is_pvh_domain(d) && (a.index != HVM_PARAM_CALLBACK_IRQ)) )
> +    if ( !has_hvm_container_domain(d) )
>          goto out;
> 
>      rc = hvm_allow_get_param(d, &a);
> diff --git a/xen/arch/x86/hvm/hypercall.c b/xen/arch/x86/hvm/hypercall.c
> index 6499caa..8cc7cc6 100644
> --- a/xen/arch/x86/hvm/hypercall.c
> +++ b/xen/arch/x86/hvm/hypercall.c
> @@ -78,7 +78,7 @@ static long hvm_physdev_op(int cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
>      switch ( cmd )
>      {
>      default:
> -        if ( !is_pvh_vcpu(curr) || !is_hardware_domain(curr->domain) )
> +        if ( !is_hardware_domain(curr->domain) )
>              return -ENOSYS;
>          /* fall through */
>      case PHYSDEVOP_map_pirq:
> @@ -86,7 +86,7 @@ static long hvm_physdev_op(int cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
>      case PHYSDEVOP_eoi:
>      case PHYSDEVOP_irq_status_query:
>      case PHYSDEVOP_get_free_pirq:
> -        if ( !has_pirq(curr->domain) && !is_pvh_vcpu(curr) )
> +        if ( !has_pirq(curr->domain) )
>              return -ENOSYS;
>          break;
>      }
> diff --git a/xen/arch/x86/hvm/io.c b/xen/arch/x86/hvm/io.c
> index 205fb68..5016300 100644
> --- a/xen/arch/x86/hvm/io.c
> +++ b/xen/arch/x86/hvm/io.c
> @@ -84,8 +84,6 @@ bool hvm_emulate_one_insn(hvm_emulate_validate_t
> *validate)
>      struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
>      int rc;
> 
> -    ASSERT(!is_pvh_vcpu(curr));
> -
>      hvm_emulate_init_once(&ctxt, validate, guest_cpu_user_regs());
> 
>      rc = hvm_emulate_one(&ctxt);
> diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
> index ebb3eca..ad2edad 100644
> --- a/xen/arch/x86/hvm/ioreq.c
> +++ b/xen/arch/x86/hvm/ioreq.c
> @@ -1387,8 +1387,7 @@ void hvm_ioreq_init(struct domain *d)
>      spin_lock_init(&d->arch.hvm_domain.ioreq_server.lock);
>      INIT_LIST_HEAD(&d->arch.hvm_domain.ioreq_server.list);
> 
> -    if ( !is_pvh_domain(d) )
> -        register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
> +    register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
>  }
> 
>  /*
> diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
> index ff7d288..760544b 100644
> --- a/xen/arch/x86/hvm/irq.c
> +++ b/xen/arch/x86/hvm/irq.c
> @@ -423,9 +423,6 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct
> vcpu *v)
>           && vcpu_info(v, evtchn_upcall_pending) )
>          return hvm_intack_vector(plat->irq.callback_via.vector);
> 
> -    if ( is_pvh_vcpu(v) )
> -        return hvm_intack_none;
> -
>      if ( vlapic_accept_pic_intr(v) && plat->vpic[0].int_output )
>          return hvm_intack_pic(0);
> 
> diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
> index 03e68ad..f98a8b7 100644
> --- a/xen/arch/x86/hvm/vmx/vmcs.c
> +++ b/xen/arch/x86/hvm/vmx/vmcs.c
> @@ -1068,20 +1068,6 @@ static int construct_vmcs(struct vcpu *v)
>                    vmx_pin_based_exec_control &
> ~PIN_BASED_POSTED_INTERRUPT);
>      }
> 
> -    if ( is_pvh_domain(d) )
> -    {
> -        /* Unrestricted guest (real mode for EPT) */
> -        v->arch.hvm_vmx.secondary_exec_control &=
> -            ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
> -
> -        /* Start in 64-bit mode. PVH 32bitfixme. */
> -        vmentry_ctl |= VM_ENTRY_IA32E_MODE;       /* GUEST_EFER.LME/LMA
> ignored */
> -
> -        ASSERT(v->arch.hvm_vmx.exec_control &
> CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
> -        ASSERT(v->arch.hvm_vmx.exec_control &
> CPU_BASED_ACTIVATE_MSR_BITMAP);
> -        ASSERT(!(v->arch.hvm_vmx.exec_control &
> CPU_BASED_RDTSC_EXITING));
> -    }
> -
>      vmx_update_cpu_exec_control(v);
> 
>      __vmwrite(VM_EXIT_CONTROLS, vmexit_ctl);
> @@ -1217,11 +1203,7 @@ static int construct_vmcs(struct vcpu *v)
>      __vmwrite(GUEST_DS_AR_BYTES, 0xc093);
>      __vmwrite(GUEST_FS_AR_BYTES, 0xc093);
>      __vmwrite(GUEST_GS_AR_BYTES, 0xc093);
> -    if ( is_pvh_domain(d) )
> -        /* CS.L == 1, exec, read/write, accessed. */
> -        __vmwrite(GUEST_CS_AR_BYTES, 0xa09b);
> -    else
> -        __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */
> +    __vmwrite(GUEST_CS_AR_BYTES, 0xc09b); /* exec/read, accessed */
> 
>      /* Guest IDT. */
>      __vmwrite(GUEST_IDTR_BASE, 0);
> @@ -1251,23 +1233,10 @@ static int construct_vmcs(struct vcpu *v)
>                | (1U << TRAP_no_device);
>      vmx_update_exception_bitmap(v);
> 
> -    /*
> -     * In HVM domains, this happens on the realmode->paging
> -     * transition.  Since PVH never goes through this transition, we
> -     * need to do it at start-of-day.
> -     */
> -    if ( is_pvh_domain(d) )
> -        vmx_update_debug_state(v);
> -
>      v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
> -
> -    /* PVH domains always start in paging mode */
> -    if ( is_pvh_domain(d) )
> -        v->arch.hvm_vcpu.guest_cr[0] |= X86_CR0_PG;
> -
>      hvm_update_guest_cr(v, 0);
> 
> -    v->arch.hvm_vcpu.guest_cr[4] = is_pvh_domain(d) ? X86_CR4_PAE : 0;
> +    v->arch.hvm_vcpu.guest_cr[4] = 0;
>      hvm_update_guest_cr(v, 4);
> 
>      if ( cpu_has_vmx_tpr_shadow )
> diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
> index 60b7c92..8404b03 100644
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -2063,9 +2063,6 @@ static int vmx_set_mode(struct vcpu *v, int mode)
>  {
>      unsigned long attr;
> 
> -    if ( !is_pvh_vcpu(v) )
> -        return 0;
> -
>      ASSERT((mode == 4) || (mode == 8));
> 
>      attr = (mode == 4) ? 0xc09b : 0xa09b;
> @@ -2300,12 +2297,6 @@ const struct hvm_function_table * __init
> start_vmx(void)
>          vmx_function_table.sync_pir_to_irr = NULL;
>      }
> 
> -    if ( cpu_has_vmx_ept
> -         && cpu_has_vmx_pat
> -         && cpu_has_vmx_msr_bitmap
> -         && cpu_has_vmx_secondary_exec_control )
> -        vmx_function_table.pvh_supported = 1;
> -
>      if ( cpu_has_vmx_tsc_scaling )
>          vmx_function_table.tsc_scaling.ratio_frac_bits = 48;
> 
> @@ -3800,8 +3791,7 @@ void vmx_vmexit_handler(struct cpu_user_regs
> *regs)
>          if ( exit_qualification & 0x10 )
>          {
>              /* INS, OUTS */
> -            if ( unlikely(is_pvh_vcpu(v)) /* PVH fixme */ ||
> -                 !hvm_emulate_one_insn(x86_insn_is_portio) )
> +            if ( !hvm_emulate_one_insn(x86_insn_is_portio) )
>                  hvm_inject_hw_exception(TRAP_gp_fault, 0);
>          }
>          else
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index 1661e66..12dabcf 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -3041,7 +3041,7 @@ static struct domain *get_pg_owner(domid_t
> domid)
>          goto out;
>      }
> 
> -    if ( !is_pvh_domain(curr) && unlikely(paging_mode_translate(curr)) )
> +    if ( unlikely(paging_mode_translate(curr)) )
>      {
>          MEM_LOG("Cannot mix foreign mappings with translated domains");
>          goto out;
> diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
> index bbfa54e..07e2ccd 100644
> --- a/xen/arch/x86/mm/p2m-pt.c
> +++ b/xen/arch/x86/mm/p2m-pt.c
> @@ -532,7 +532,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m,
> unsigned long gfn, mfn_t mfn,
> 
>      if ( unlikely(p2m_is_foreign(p2mt)) )
>      {
> -        /* pvh fixme: foreign types are only supported on ept at present */
> +        /* hvm fixme: foreign types are only supported on ept at present */
>          gdprintk(XENLOG_WARNING, "Unimplemented foreign p2m type.\n");
>          return -EINVAL;
>      }
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 2eee9cd..a5651a3 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -589,7 +589,7 @@ int p2m_alloc_table(struct p2m_domain *p2m)
>  }
> 
>  /*
> - * pvh fixme: when adding support for pvh non-hardware domains, this path
> must
> + * hvm fixme: when adding support for pvh non-hardware domains, this
> path must
>   * cleanup any foreign p2m types (release refcnts on them).
>   */
>  void p2m_teardown(struct p2m_domain *p2m)
> @@ -2411,10 +2411,10 @@ int p2m_add_foreign(struct domain *tdom,
> unsigned long fgfn,
>      struct domain *fdom;
> 
>      ASSERT(tdom);
> -    if ( foreigndom == DOMID_SELF || !is_pvh_domain(tdom) )
> +    if ( foreigndom == DOMID_SELF )
>          return -EINVAL;
>      /*
> -     * pvh fixme: until support is added to p2m teardown code to cleanup any
> +     * hvm fixme: until support is added to p2m teardown code to cleanup
> any
>       * foreign entries, limit this to hardware domain only.
>       */
>      if ( !is_hardware_domain(tdom) )
> diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c
> index fc45bfb..81cd6c9 100644
> --- a/xen/arch/x86/physdev.c
> +++ b/xen/arch/x86/physdev.c
> @@ -517,10 +517,6 @@ ret_t do_physdev_op(int cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
>          struct vcpu *curr = current;
>          struct physdev_set_iopl set_iopl;
> 
> -        ret = -ENOSYS;
> -        if ( is_pvh_vcpu(curr) )
> -            break;
> -
>          ret = -EFAULT;
>          if ( copy_from_guest(&set_iopl, arg, 1) != 0 )
>              break;
> @@ -536,10 +532,6 @@ ret_t do_physdev_op(int cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
>          struct vcpu *curr = current;
>          struct physdev_set_iobitmap set_iobitmap;
> 
> -        ret = -ENOSYS;
> -        if ( is_pvh_vcpu(curr) )
> -            break;
> -
>          ret = -EFAULT;
>          if ( copy_from_guest(&set_iobitmap, arg, 1) != 0 )
>              break;
> diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
> index dab67d5..4a807b8 100644
> --- a/xen/arch/x86/setup.c
> +++ b/xen/arch/x86/setup.c
> @@ -62,10 +62,6 @@ integer_param("maxcpus", max_cpus);
> 
>  unsigned long __read_mostly cr4_pv32_mask;
> 
> -/* Boot dom0 in pvh mode */
> -static bool_t __initdata opt_dom0pvh;
> -boolean_param("dom0pvh", opt_dom0pvh);
> -
>  /* **** Linux config option: propagated to domain0. */
>  /* "acpi=off":    Sisables both ACPI table parsing and interpreter. */
>  /* "acpi=force":  Override the disable blacklist.                   */
> @@ -1545,9 +1541,6 @@ void __init noreturn __start_xen(unsigned long
> mbi_p)
> 
>      init_guest_cpuid();
> 
> -    if ( opt_dom0pvh )
> -        domcr_flags |= DOMCRF_pvh | DOMCRF_hap;
> -
>      if ( dom0_pvh )
>      {
>          domcr_flags |= DOMCRF_hvm |
> diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
> index 3ad2ab0..b739dc8 100644
> --- a/xen/arch/x86/time.c
> +++ b/xen/arch/x86/time.c
> @@ -2013,33 +2013,6 @@ void tsc_set_info(struct domain *d,
>          d->arch.vtsc = 0;
>          return;
>      }
> -    if ( is_pvh_domain(d) )
> -    {
> -        /*
> -         * PVH fixme: support more tsc modes.
> -         *
> -         * NB: The reason this is disabled here appears to be with
> -         * additional support required to do the PV RDTSC emulation.
> -         * Since we're no longer taking the PV emulation path for
> -         * anything, we may be able to remove this restriction.
> -         *
> -         * pvhfixme: Experiments show that "default" works for PVH,
> -         * but "always_emulate" does not for some reason.  Figure out
> -         * why.
> -         */
> -        switch ( tsc_mode )
> -        {
> -        case TSC_MODE_NEVER_EMULATE:
> -            break;
> -        default:
> -            printk(XENLOG_WARNING
> -                   "PVH currently does not support tsc emulation. Setting
> timer_mode = never_emulate\n");
> -            /* FALLTHRU */
> -        case TSC_MODE_DEFAULT:
> -            tsc_mode = TSC_MODE_NEVER_EMULATE;
> -            break;
> -        }
> -    }
> 
>      switch ( d->arch.tsc_mode = tsc_mode )
>      {
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index 4492c9c..b22aacc 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -304,8 +304,6 @@ struct domain *domain_create(domid_t domid,
> unsigned int domcr_flags,
> 
>      if ( domcr_flags & DOMCRF_hvm )
>          d->guest_type = guest_type_hvm;
> -    else if ( domcr_flags & DOMCRF_pvh )
> -        d->guest_type = guest_type_pvh;
>      else
>          d->guest_type = guest_type_pv;
> 
> diff --git a/xen/common/domctl.c b/xen/common/domctl.c
> index 93e3029..951a5dc 100644
> --- a/xen/common/domctl.c
> +++ b/xen/common/domctl.c
> @@ -194,9 +194,6 @@ void getdomaininfo(struct domain *d, struct
> xen_domctl_getdomaininfo *info)
>      case guest_type_hvm:
>          info->flags |= XEN_DOMINF_hvm_guest;
>          break;
> -    case guest_type_pvh:
> -        info->flags |= XEN_DOMINF_pvh_guest;
> -        break;
>      default:
>          break;
>      }
> @@ -501,7 +498,6 @@ long
> do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
>          ret = -EINVAL;
>          if ( (op->u.createdomain.flags &
>               ~(XEN_DOMCTL_CDF_hvm_guest
> -               | XEN_DOMCTL_CDF_pvh_guest
>                 | XEN_DOMCTL_CDF_hap
>                 | XEN_DOMCTL_CDF_s3_integrity
>                 | XEN_DOMCTL_CDF_oos_off
> @@ -532,15 +528,9 @@ long
> do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
>              rover = dom;
>          }
> 
> -        if ( (op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest)
> -             && (op->u.createdomain.flags & XEN_DOMCTL_CDF_pvh_guest) )
> -            return -EINVAL;
> -
>          domcr_flags = 0;
>          if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hvm_guest )
>              domcr_flags |= DOMCRF_hvm;
> -        if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_pvh_guest )
> -            domcr_flags |= DOMCRF_pvh;
>          if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_hap )
>              domcr_flags |= DOMCRF_hap;
>          if ( op->u.createdomain.flags & XEN_DOMCTL_CDF_s3_integrity )
> diff --git a/xen/common/kernel.c b/xen/common/kernel.c
> index 4b87c60..a4ae612 100644
> --- a/xen/common/kernel.c
> +++ b/xen/common/kernel.c
> @@ -324,11 +324,6 @@ DO(xen_version)(int cmd,
> XEN_GUEST_HANDLE_PARAM(void) arg)
>                               (1U << XENFEAT_highmem_assist) |
>                               (1U << XENFEAT_gnttab_map_avail_bits);
>                  break;
> -            case guest_type_pvh:
> -                fi.submap |= (1U << XENFEAT_hvm_safe_pvclock) |
> -                             (1U << XENFEAT_supervisor_mode_kernel) |
> -                             (1U << XENFEAT_hvm_callback_vector);
> -                break;
>              case guest_type_hvm:
>                  fi.submap |= (1U << XENFEAT_hvm_safe_pvclock) |
>                               (1U << XENFEAT_hvm_callback_vector) |
> diff --git a/xen/common/vm_event.c b/xen/common/vm_event.c
> index 45046d1..0fe9a53 100644
> --- a/xen/common/vm_event.c
> +++ b/xen/common/vm_event.c
> @@ -606,8 +606,8 @@ int vm_event_domctl(struct domain *d,
> xen_domctl_vm_event_op_t *vec,
>              struct p2m_domain *p2m = p2m_get_hostp2m(d);
> 
>              rc = -EOPNOTSUPP;
> -            /* pvh fixme: p2m_is_foreign types need addressing */
> -            if ( is_pvh_vcpu(current) || is_pvh_domain(hardware_domain) )
> +            /* hvm fixme: p2m_is_foreign types need addressing */
> +            if ( is_hvm_domain(hardware_domain) )
>                  break;
> 
>              rc = -ENODEV;
> @@ -707,8 +707,8 @@ int vm_event_domctl(struct domain *d,
> xen_domctl_vm_event_op_t *vec,
>          {
>          case XEN_VM_EVENT_ENABLE:
>              rc = -EOPNOTSUPP;
> -            /* pvh fixme: p2m_is_foreign types need addressing */
> -            if ( is_pvh_vcpu(current) || is_pvh_domain(hardware_domain) )
> +            /* hvm fixme: p2m_is_foreign types need addressing */
> +            if ( is_hvm_domain(hardware_domain) )
>                  break;
> 
>              rc = -ENODEV;
> diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
> index 1beef2f..8f0edaf 100644
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -15,7 +15,6 @@
>  #define has_32bit_shinfo(d)    ((d)->arch.has_32bit_shinfo)
>  #define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
>  #define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
> -#define is_pvh_32bit_domain(d) (is_pvh_domain(d) &&
> has_32bit_shinfo(d))
> 
>  #define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) &&
> \
>          d->arch.hvm_domain.irq.callback_via_type ==
> HVMIRQ_callback_vector)
> diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-
> x86/hvm/hvm.h
> index 87b203a..8c8c633 100644
> --- a/xen/include/asm-x86/hvm/hvm.h
> +++ b/xen/include/asm-x86/hvm/hvm.h
> @@ -91,9 +91,6 @@ struct hvm_function_table {
>      /* Support Hardware-Assisted Paging? */
>      bool_t hap_supported;
> 
> -    /* Necessary hardware support for PVH mode? */
> -    bool_t pvh_supported;
> -
>      /* Necessary hardware support for alternate p2m's? */
>      bool altp2m_supported;
> 
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> index 85cbb7c..9e3ce21 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -37,7 +37,7 @@
>  #include "hvm/save.h"
>  #include "memory.h"
> 
> -#define XEN_DOMCTL_INTERFACE_VERSION 0x0000000c
> +#define XEN_DOMCTL_INTERFACE_VERSION 0x0000000d
> 
>  /*
>   * NB. xen_domctl.domain is an IN/OUT parameter for this operation.
> @@ -60,11 +60,8 @@ struct xen_domctl_createdomain {
>   /* Disable out-of-sync shadow page tables? */
>  #define _XEN_DOMCTL_CDF_oos_off       3
>  #define XEN_DOMCTL_CDF_oos_off
> (1U<<_XEN_DOMCTL_CDF_oos_off)
> - /* Is this a PVH guest (as opposed to an HVM or PV guest)? */
> -#define _XEN_DOMCTL_CDF_pvh_guest     4
> -#define XEN_DOMCTL_CDF_pvh_guest
> (1U<<_XEN_DOMCTL_CDF_pvh_guest)
>   /* Is this a xenstore domain? */
> -#define _XEN_DOMCTL_CDF_xs_domain     5
> +#define _XEN_DOMCTL_CDF_xs_domain     4
>  #define XEN_DOMCTL_CDF_xs_domain
> (1U<<_XEN_DOMCTL_CDF_xs_domain)
>      uint32_t flags;
>      struct xen_arch_domainconfig config;
> @@ -97,14 +94,11 @@ struct xen_domctl_getdomaininfo {
>   /* Being debugged.  */
>  #define _XEN_DOMINF_debugged  6
>  #define XEN_DOMINF_debugged   (1U<<_XEN_DOMINF_debugged)
> -/* domain is PVH */
> -#define _XEN_DOMINF_pvh_guest 7
> -#define XEN_DOMINF_pvh_guest  (1U<<_XEN_DOMINF_pvh_guest)
>  /* domain is a xenstore domain */
> -#define _XEN_DOMINF_xs_domain 8
> +#define _XEN_DOMINF_xs_domain 7
>  #define XEN_DOMINF_xs_domain  (1U<<_XEN_DOMINF_xs_domain)
>  /* domain has hardware assisted paging */
> -#define _XEN_DOMINF_hap       9
> +#define _XEN_DOMINF_hap       8
>  #define XEN_DOMINF_hap        (1U<<_XEN_DOMINF_hap)
>   /* XEN_DOMINF_shutdown guest-supplied code.  */
>  #define XEN_DOMINF_shutdownmask 255
> diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
> index 0929c0b..cc11999 100644
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -312,7 +312,7 @@ struct evtchn_port_ops;
>   * will be false, but has_hvm_container_* checks will be true.
>   */
>  enum guest_type {
> -    guest_type_pv, guest_type_pvh, guest_type_hvm
> +    guest_type_pv, guest_type_hvm
>  };
> 
>  struct domain
> @@ -555,11 +555,8 @@ struct domain *domain_create(domid_t domid,
> unsigned int domcr_flags,
>   /* DOMCRF_oos_off: dont use out-of-sync optimization for shadow page
> tables */
>  #define _DOMCRF_oos_off         4
>  #define DOMCRF_oos_off          (1U<<_DOMCRF_oos_off)
> - /* DOMCRF_pvh: Create PV domain in HVM container. */
> -#define _DOMCRF_pvh             5
> -#define DOMCRF_pvh              (1U<<_DOMCRF_pvh)
>   /* DOMCRF_xs_domain: xenstore domain */
> -#define _DOMCRF_xs_domain       6
> +#define _DOMCRF_xs_domain       5
>  #define DOMCRF_xs_domain        (1U<<_DOMCRF_xs_domain)
> 
>  /*
> @@ -875,8 +872,6 @@ void watchdog_domain_destroy(struct domain *d);
> 
>  #define is_pv_domain(d) ((d)->guest_type == guest_type_pv)
>  #define is_pv_vcpu(v)   (is_pv_domain((v)->domain))
> -#define is_pvh_domain(d) ((d)->guest_type == guest_type_pvh)
> -#define is_pvh_vcpu(v)   (is_pvh_domain((v)->domain))
>  #define is_hvm_domain(d) ((d)->guest_type == guest_type_hvm)
>  #define is_hvm_vcpu(v)   (is_hvm_domain(v->domain))
>  #define has_hvm_container_domain(d) ((d)->guest_type !=
> guest_type_pv)
> --
> 2.10.1 (Apple Git-78)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.