[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] docs: honour XEN_DUMP_DIR



commit d7f64ff4e08c885c610faff8fcf77c127dee78bb
Author:     Wei Liu <wei.liu2@xxxxxxxxxx>
AuthorDate: Mon Jun 13 08:49:07 2016 +0100
Commit:     Wei Liu <wei.liu2@xxxxxxxxxx>
CommitDate: Fri Jun 17 11:27:21 2016 +0100

    docs: honour XEN_DUMP_DIR
    
    Use configure to generate xl.cfg and xl manpage. Add the generated files
    to gitignore.
    
    Signed-off-by: Wei Liu <wei.liu2@xxxxxxxxxx>
    Acked-by: Ian Jackson <ian.jackson@xxxxxxxxxxxxx>
---
 .gitignore               |    2 +
 docs/configure           |  184 ++++-
 docs/configure.ac        |    9 +-
 docs/man/xl.cfg.pod.5    | 2029 ----------------------------------------------
 docs/man/xl.cfg.pod.5.in | 2029 ++++++++++++++++++++++++++++++++++++++++++++++
 docs/man/xl.pod.1        | 1770 ----------------------------------------
 docs/man/xl.pod.1.in     | 1770 ++++++++++++++++++++++++++++++++++++++++
 7 files changed, 3992 insertions(+), 3801 deletions(-)

diff --git a/.gitignore b/.gitignore
index 496194f..8e0a177 100644
--- a/.gitignore
+++ b/.gitignore
@@ -41,6 +41,8 @@ config/Paths.mk
 build-*
 dist/*
 docs/html/
+docs/man/xl.cfg.pod.5
+docs/man/xl.pod.1
 docs/man1/
 docs/man5/
 docs/man8/
diff --git a/docs/configure b/docs/configure
index fbb78ac..46f0e68 100755
--- a/docs/configure
+++ b/docs/configure
@@ -594,6 +594,24 @@ POD2TEXT
 POD2HTML
 POD2MAN
 FIG2DEV
+XEN_DUMP_DIR
+XEN_PAGING_DIR
+XEN_LOCK_DIR
+XEN_SCRIPT_DIR
+XEN_CONFIG_DIR
+INITD_DIR
+CONFIG_DIR
+SHAREDIR
+XEN_LIB_DIR
+XEN_LIB_STORED
+XEN_LOG_DIR
+XEN_RUN_DIR
+XENFIRMWAREDIR
+LIBEXEC_INC
+LIBEXEC_LIB
+LIBEXEC_BIN
+LIBEXEC
+CONFIG_LEAF_DIR
 target_alias
 host_alias
 build_alias
@@ -635,6 +653,10 @@ SHELL'
 ac_subst_files=''
 ac_user_opts='
 enable_option_checking
+with_initddir
+with_sysconfig_leaf_dir
+with_libexec_leaf_dir
+with_xen_dumpdir
 '
       ac_precious_vars='build_alias
 host_alias
@@ -1251,6 +1273,21 @@ if test -n "$ac_init_help"; then
    esac
   cat <<\_ACEOF
 
+Optional Packages:
+  --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
+  --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
+  --with-initddir=DIR     Path to directory with sysv runlevel scripts.
+                          [SYSCONFDIR/init.d]
+  --with-sysconfig-leaf-dir=SUBDIR
+                          Name of subdirectory in /etc to store runtime
+                          options for runlevel scripts and daemons such as
+                          xenstored. This should be either "sysconfig" or
+                          "default". [sysconfig]
+  --with-libexec-leaf-dir=SUBDIR
+                          Name of subdirectory in libexecdir to use.
+  --with-xen-dumpdir=DIR  Path to directory for domU crash dumps.
+                          [LOCALSTATEDIR/lib/xen/dump]
+
 Some influential environment variables:
   FIG2DEV     Path to fig2dev tool
   POD2MAN     Path to pod2man tool
@@ -1693,7 +1730,7 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
 
 
 
-ac_config_files="$ac_config_files ../config/Docs.mk"
+ac_config_files="$ac_config_files ../config/Docs.mk man/xl.cfg.pod.5 
man/xl.pod.1"
 
 ac_aux_dir=
 for ac_dir in ../ "$srcdir"/../; do
@@ -1741,6 +1778,149 @@ ac_configure="$SHELL $ac_aux_dir/configure"  # Please 
don't use this var.
 
 
 
+
+test "x$prefix" = "xNONE" && prefix=$ac_default_prefix
+test "x$exec_prefix" = "xNONE" && exec_prefix=${prefix}
+
+if test "$localstatedir" = '${prefix}/var' ; then
+    localstatedir=/var
+fi
+
+bindir=`eval echo $bindir`
+sbindir=`eval echo $sbindir`
+libdir=`eval echo $libdir`
+
+if test "x$sysconfdir" = 'x${prefix}/etc' ; then
+    case "$host_os" in
+         *freebsd*)
+         sysconfdir=$prefix/etc
+         ;;
+         *solaris*)
+         if test "$prefix" = "/usr" ; then
+             sysconfdir=/etc
+         else
+             sysconfdir=$prefix/etc
+         fi
+         ;;
+         *)
+         sysconfdir=/etc
+         ;;
+    esac
+fi
+
+
+# Check whether --with-initddir was given.
+if test "${with_initddir+set}" = set; then :
+  withval=$with_initddir; initddir_path=$withval
+else
+  case "$host_os" in
+         *linux*)
+         if test -d $sysconfdir/rc.d/init.d ; then
+             initddir_path=$sysconfdir/rc.d/init.d
+         else
+             initddir_path=$sysconfdir/init.d
+         fi
+         ;;
+         *)
+         initddir_path=$sysconfdir/rc.d
+         ;;
+     esac
+fi
+
+
+
+# Check whether --with-sysconfig-leaf-dir was given.
+if test "${with_sysconfig_leaf_dir+set}" = set; then :
+  withval=$with_sysconfig_leaf_dir; config_leaf_dir=$withval
+else
+  config_leaf_dir=sysconfig
+    if test ! -d /etc/sysconfig ; then config_leaf_dir=default ; fi
+fi
+
+CONFIG_LEAF_DIR=$config_leaf_dir
+
+
+
+# Check whether --with-libexec-leaf-dir was given.
+if test "${with_libexec_leaf_dir+set}" = set; then :
+  withval=$with_libexec_leaf_dir; libexec_subdir=$withval
+else
+  libexec_subdir=$PACKAGE_TARNAME
+fi
+
+
+
+# Check whether --with-xen-dumpdir was given.
+if test "${with_xen_dumpdir+set}" = set; then :
+  withval=$with_xen_dumpdir; xen_dumpdir_path=$withval
+else
+  xen_dumpdir_path=$localstatedir/lib/xen/dump
+fi
+
+
+if test "$libexecdir" = '${exec_prefix}/libexec' ; then
+    case "$host_os" in
+         *netbsd*) ;;
+         *)
+         libexecdir='${exec_prefix}/lib'
+         ;;
+    esac
+fi
+LIBEXEC=`eval echo $libexecdir/$libexec_subdir`
+
+
+LIBEXEC_BIN=${LIBEXEC}/bin
+
+LIBEXEC_LIB=${LIBEXEC}/lib
+
+LIBEXEC_INC=${LIBEXEC}/include
+
+XENFIRMWAREDIR=${LIBEXEC}/boot
+
+
+XEN_RUN_DIR=$localstatedir/run/xen
+
+
+XEN_LOG_DIR=$localstatedir/log/xen
+
+
+XEN_LIB_STORED=$localstatedir/lib/xenstored
+
+
+XEN_LIB_DIR=$localstatedir/lib/xen
+
+
+SHAREDIR=$prefix/share
+
+
+CONFIG_DIR=$sysconfdir
+
+
+INITD_DIR=$initddir_path
+
+
+XEN_CONFIG_DIR=$CONFIG_DIR/xen
+
+
+XEN_SCRIPT_DIR=$XEN_CONFIG_DIR/scripts
+
+
+case "$host_os" in
+*freebsd*) XEN_LOCK_DIR=$localstatedir/lib ;;
+*netbsd*) XEN_LOCK_DIR=$localstatedir/lib ;;
+*) XEN_LOCK_DIR=$localstatedir/lock ;;
+esac
+
+
+XEN_PAGING_DIR=$localstatedir/lib/xen/xenpaging
+
+
+XEN_DUMP_DIR=$xen_dumpdir_path
+
+
+
+
+
     # Extract the first word of "fig2dev", so it can be a program name with 
args.
 set dummy fig2dev; ac_word=$2
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
@@ -2793,6 +2973,8 @@ for ac_config_target in $ac_config_targets
 do
   case $ac_config_target in
     "../config/Docs.mk") CONFIG_FILES="$CONFIG_FILES ../config/Docs.mk" ;;
+    "man/xl.cfg.pod.5") CONFIG_FILES="$CONFIG_FILES man/xl.cfg.pod.5" ;;
+    "man/xl.pod.1") CONFIG_FILES="$CONFIG_FILES man/xl.pod.1" ;;
 
   *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
   esac
diff --git a/docs/configure.ac b/docs/configure.ac
index bc77f49..a2929c4 100644
--- a/docs/configure.ac
+++ b/docs/configure.ac
@@ -5,13 +5,20 @@ AC_PREREQ([2.67])
 AC_INIT([Xen Hypervisor Documentation], m4_esyscmd([../version.sh 
../xen/Makefile]),
     [xen-devel@xxxxxxxxxxxxx], [xen], [http://www.xen.org/])
 AC_CONFIG_SRCDIR([misc/xen-command-line.markdown])
-AC_CONFIG_FILES([../config/Docs.mk])
+AC_CONFIG_FILES([
+../config/Docs.mk
+man/xl.cfg.pod.5
+man/xl.pod.1
+])
 AC_CONFIG_AUX_DIR([../])
 
 # M4 Macro includes
 m4_include([../m4/docs_tool.m4])
 m4_include([../m4/path_or_fail.m4])
 m4_include([../m4/features.m4])
+m4_include([../m4/paths.m4])
+
+AX_XEN_EXPAND_CONFIG()
 
 AX_DOCS_TOOL_PROG([FIG2DEV], [fig2dev])
 AX_DOCS_TOOL_PROG([POD2MAN], [pod2man])
diff --git a/docs/man/xl.cfg.pod.5 b/docs/man/xl.cfg.pod.5
deleted file mode 100644
index 4a8bf51..0000000
--- a/docs/man/xl.cfg.pod.5
+++ /dev/null
@@ -1,2029 +0,0 @@
-=head1 NAME
-
-xl.cfg - XL Domain Configuration File Syntax
-
-=head1 SYNOPSIS
-
- /etc/xen/xldomain
-
-=head1 DESCRIPTION
-
-To create a VM (a domain in Xen terminology, sometimes called a guest)
-with xl requires the provision of a domain config file.  Typically
-these live in `/etc/xen/DOMAIN.cfg` where DOMAIN is the name of the
-domain.
-
-=head1 SYNTAX
-
-A domain config file consists of a series of C<KEY=VALUE> pairs.
-
-Some C<KEY>s are mandatory, others are general options which apply to
-any guest type while others relate only to specific guest types
-(e.g. PV or HVM guests).
-
-A value C<VALUE> is one of:
-
-=over 4
-
-=item B<"STRING">
-
-A string, surrounded by either single or double quotes.
-
-=item B<NUMBER>
-
-A number, in either decimal, octal (using a C<0> prefix) or
-hexadecimal (using an C<0x> prefix).
-
-=item B<BOOLEAN>
-
-A C<NUMBER> interpreted as C<False> (C<0>) or C<True> (any other
-value).
-
-=item B<[ VALUE, VALUE, ... ]>
-
-A list of C<VALUES> of the above types. Lists can be heterogeneous and
-nested.
-
-=back
-
-The semantics of each C<KEY> defines which form of C<VALUE> is required.
-
-Pairs may be separated either by a newline or a semicolon.  Both
-of the following are valid:
-
-  name="h0"
-  builder="hvm"
-
-  name="h0"; builder="hvm"
-
-=head1 OPTIONS
-
-=head2 Mandatory Configuration Items
-
-The following key is mandatory for any guest type:
-
-=over 4
-
-=item B<name="NAME">
-
-Specifies the name of the domain.  Names of domains existing on a
-single host must be unique.
-
-=back
-
-=head2 Selecting Guest Type
-
-=over 4
-
-=item B<builder="generic">
-
-Specifies that this is to be a PV domain. This is the default.
-
-=item B<builder="hvm">
-
-Specifies that this is to be an HVM domain.  That is, a fully
-virtualised computer with emulated BIOS, disk and network peripherals,
-etc.  The default is a PV domain, suitable for hosting Xen-aware guest
-operating systems.
-
-=back
-
-=head2 General Options
-
-The following options apply to guests of any type.
-
-=head3 CPU Allocation
-
-=over 4
-
-=item B<pool="CPUPOOLNAME">
-
-Put the guest's vcpus into the named cpu pool.
-
-=item B<vcpus=N>
-
-Start the guest with N vcpus initially online.
-
-=item B<maxvcpus=M>
-
-Allow the guest to bring up a maximum of M vcpus. At start of day if
-`vcpus=N` is less than `maxvcpus=M` then the first `N` vcpus will be
-created online and the remainder will be offline.
-
-=item B<cpus="CPU-LIST">
-
-List of which cpus the guest is allowed to use. Default is no pinning at
-all (more on this below). A C<CPU-LIST> may be specified as follows:
-
-=over 4
-
-=item "all"
-
-To allow all the vcpus of the guest to run on all the cpus on the host.
-
-=item "0-3,5,^1"
-
-To allow all the vcpus of the guest to run on cpus 0,2,3,5. Combining
-this with "all" is possible, meaning "all,^7" results in all the vcpus
-of the guest running on all the cpus on the host except cpu 7.
-
-=item "nodes:0-3,node:^2"
-
-To allow all the vcpus of the guest to run on the cpus from NUMA nodes
-0,1,3 of the host. So, if cpus 0-3 belongs to node 0, cpus 4-7 belongs
-to node 1 and cpus 8-11 to node 3, the above would mean all the vcpus
-of the guest will run on cpus 0-3,8-11.
-
-Combining this notation with the one above is possible. For instance,
-"1,node:2,^6", means all the vcpus of the guest will run on cpu 1 and
-on all the cpus of NUMA node 2, but not on cpu 6. Following the same
-example as above, that would be cpus 1,4,5,7.
-
-Combining this with "all" is also possible, meaning "all,^nodes:1"
-results in all the vcpus of the guest running on all the cpus on the
-host, except for the cpus belonging to the host NUMA node 1.
-
-=item ["2", "3-8,^5"]
-
-To ask for specific vcpu mapping. That means (in this example), vcpu 0
-of the guest will run on cpu 2 of the host and vcpu 1 of the guest will
-run on cpus 3,4,6,7,8 of the host.
-
-More complex notation can be also used, exactly as described above. So
-"all,^5-8", or just "all", or "node:0,node:2,^9-11,18-20" are all legal,
-for each element of the list.
-
-=back
-
-If this option is not specified, no vcpu to cpu pinning is established,
-and the vcpus of the guest can run on all the cpus of the host. If this
-option is specified, the intersection of the vcpu pinning mask, provided
-here, and the soft affinity mask, provided via B<cpus\_soft=> (if any),
-is utilized to compute the domain node-affinity, for driving memory
-allocations.
-
-=item B<cpus_soft="CPU-LIST">
-
-Exactly as B<cpus=>, but specifies soft affinity, rather than pinning
-(hard affinity). When using the credit scheduler, this means what cpus
-the vcpus of the domain prefer.
-
-A C<CPU-LIST> is specified exactly as above, for B<cpus=>.
-
-If this option is not specified, the vcpus of the guest will not have
-any preference regarding on what cpu to run. If this option is specified,
-the intersection of the soft affinity mask, provided here, and the vcpu
-pinning, provided via B<cpus=> (if any), is utilized to compute the
-domain node-affinity, for driving memory allocations.
-
-If this option is not specified (and B<cpus=> is not specified either),
-libxl automatically tries to place the guest on the least possible
-number of nodes. A heuristic approach is used for choosing the best
-node (or set of nodes), with the goal of maximizing performance for
-the guest and, at the same time, achieving efficient utilization of
-host cpus and memory. In that case, the soft affinity of all the vcpus
-of the domain will be set to the pcpus belonging to the NUMA nodes
-chosen during placement.
-
-For more details, see F<docs/misc/xl-numa-placement.markdown>.
-
-=back
-
-=head3 CPU Scheduling
-
-=over 4
-
-=item B<cpu_weight=WEIGHT>
-
-A domain with a weight of 512 will get twice as much CPU as a domain
-with a weight of 256 on a contended host.
-Legal weights range from 1 to 65535 and the default is 256.
-Honoured by the credit and credit2 schedulers.
-
-=item B<cap=N>
-
-The cap optionally fixes the maximum amount of CPU a domain will be
-able to consume, even if the host system has idle CPU cycles.
-The cap is expressed in percentage of one physical CPU:
-100 is 1 physical CPU, 50 is half a CPU, 400 is 4 CPUs, etc.
-The default, 0, means there is no upper cap.
-Honoured by the credit and credit2 schedulers.
-
-NB: Many systems have features that will scale down the computing
-power of a cpu that is not 100% utilized.  This can be in the
-operating system, but can also sometimes be below the operating system
-in the BIOS.  If you set a cap such that individual cores are running
-at less than 100%, this may have an impact on the performance of your
-workload over and above the impact of the cap. For example, if your
-processor runs at 2GHz, and you cap a vm at 50%, the power management
-system may also reduce the clock speed to 1GHz; the effect will be
-that your VM gets 25% of the available power (50% of 1GHz) rather than
-50% (50% of 2GHz).  If you are not getting the performance you expect,
-look at performance and cpufreq options in your operating system and
-your BIOS.
-
-=back
-
-=head3 Memory Allocation
-
-=over 4
-
-=item B<memory=MBYTES>
-
-Start the guest with MBYTES megabytes of RAM.
-
-=item B<maxmem=MBYTES>
-
-Specifies the maximum amount of memory a guest can ever see.
-The value of B<maxmem=> must be equal or greater than B<memory=>.
-
-In combination with B<memory=> it will start the guest "pre-ballooned",
-if the values of B<memory=> and B<maxmem=> differ.
-A "pre-ballooned" HVM guest needs a balloon driver, without a balloon driver
-it will crash.
-
-NOTE: Because of the way ballooning works, the guest has to allocate
-memory to keep track of maxmem pages, regardless of how much memory it
-actually has available to it.  A guest with maxmem=262144 and
-memory=8096 will report significantly less memory available for use
-than a system with maxmem=8096 memory=8096 due to the memory overhead
-of having to track the unused pages.
-
-=back
-
-=head3 Guest Virtual NUMA Configuration
-
-=over 4
-
-=item B<vnuma=[ VNODE_SPEC, VNODE_SPEC, ... ]>
-
-Specify virtual NUMA configuration with positional arguments. The
-nth B<VNODE_SPEC> in the list specifies the configuration of nth
-virtual node.
-
-Note that virtual NUMA for PV guest is not yet supported, because
-there is an issue with cpuid handling that affects PV virtual NUMA.
-Furthermore, guests with virtual NUMA cannot be saved or migrated
-because the migration stream does not preserve node information.
-
-Each B<VNODE_SPEC> is a list, which has a form of
-"[VNODE_CONFIG_OPTION,VNODE_CONFIG_OPTION, ... ]"  (without quotes).
-
-For example vnuma = [ ["pnode=0","size=512","vcpus=0-4","vdistances=10,20"] ]
-means vnode 0 is mapped to pnode 0, has 512MB ram, has vcpus 0 to 4, the
-distance to itself is 10 and the distance to vnode 1 is 20.
-
-Each B<VNODE_CONFIG_OPTION> is a quoted key=value pair. Supported
-B<VNODE_CONFIG_OPTION>s are (they are all mandatory at the moment):
-
-=over 4
-
-=item B<pnode=NUMBER>
-
-Specify which physical node this virtual node maps to.
-
-=item B<size=MBYTES>
-
-Specify the size of this virtual node. The sum of memory size of all
-vnodes will become B<maxmem=>. If B<maxmem=> is specified separately,
-a check is performed to make sure the sum of all vnode memory matches
-B<maxmem=>.
-
-=item B<vcpus=CPU-STRING>
-
-Specify which vcpus belong to this node. B<CPU-STRING> is a string
-separated by comma. You can specify range and single cpu. An example
-is "vcpus=0-5,8", which means you specify vcpu 0 to vcpu 5, and vcpu
-8.
-
-=item B<vdistances=NUMBER, NUMBER, ... >
-
-Specify virtual distance from this node to all nodes (including
-itself) with positional arguments. For example, "vdistance=10,20"
-for vnode 0 means the distance from vnode 0 to vnode 0 is 10, from
-vnode 0 to vnode 1 is 20. The number of arguments supplied must match
-the total number of vnodes.
-
-Normally you can use the values from "xl info -n" or "numactl
---hardware" to fill in vdistance list.
-
-=back
-
-=back
-
-=head3 Event Actions
-
-=over 4
-
-=item B<on_poweroff="ACTION">
-
-Specifies what should be done with the domain if it shuts itself down.
-The C<ACTION>s are:
-
-=over 4
-
-=item B<destroy>
-
-destroy the domain
-
-=item B<restart>
-
-destroy the domain and immediately create a new domain with the same
-configuration
-
-=item B<rename-restart>
-
-rename the domain which terminated, and then immediately create a new
-domain with the same configuration as the original
-
-=item B<preserve>
-
-keep the domain.  It can be examined, and later destroyed with `xl
-destroy`.
-
-=item B<coredump-destroy>
-
-write a "coredump" of the domain to F</var/lib/xen/dump/NAME> and then
-destroy the domain.
-
-=item B<coredump-restart>
-
-write a "coredump" of the domain to F</var/lib/xen/dump/NAME> and then
-restart the domain.
-
-=item B<soft-reset>
-
-Reset all Xen specific interfaces for the Xen-aware HVM domain allowing
-it to reestablish these interfaces and continue executing the domain. PV
-and non-Xen-aware HVM guests are not supported.
-
-=back
-
-The default for C<on_poweroff> is C<destroy>.
-
-=item B<on_reboot="ACTION">
-
-Action to take if the domain shuts down with a reason code requesting
-a reboot.  Default is C<restart>.
-
-=item B<on_watchdog="ACTION">
-
-Action to take if the domain shuts down due to a Xen watchdog timeout.
-Default is C<destroy>.
-
-=item B<on_crash="ACTION">
-
-Action to take if the domain crashes.  Default is C<destroy>.
-
-=item B<on_soft_reset="ACTION">
-
-Action to take if the domain performs 'soft reset' (e.g. does kexec).
-Default is C<soft-reset>.
-
-=back
-
-=head3 Direct Kernel Boot
-
-Direct kernel boot allows booting directly from a kernel and initrd
-stored in the host physical machine OS, allowing command line arguments
-to be passed directly. PV guest direct kernel boot is supported. HVM
-guest direct kernel boot is supported with limitation (it's supported
-when using qemu-xen and default BIOS 'seabios'; not supported in case of
-stubdom-dm and old rombios.)
-
-=over 4
-
-=item B<kernel="PATHNAME">
-
-Load the specified file as the kernel image.
-
-=item B<ramdisk="PATHNAME">
-
-Load the specified file as the ramdisk.
-
-=item B<cmdline="STRING">
-
-Append B<cmdline="STRING"> to the kernel command line. (Note: it is
-guest specific what meaning this has). It can replace B<root="STRING">
-plus B<extra="STRING"> and is preferred. When B<cmdline="STRING"> is set,
-B<root="STRING"> and B<extra="STRING"> will be ignored.
-
-=item B<root="STRING">
-
-Append B<root="STRING"> to the kernel command line (Note: it is guest
-specific what meaning this has).
-
-=item B<extra="STRING">
-
-Append B<STRING> to the kernel command line. (Note: it is guest
-specific what meaning this has).
-
-=back
-
-=head3 Other Options
-
-=over 4
-
-=item B<uuid="UUID">
-
-Specifies the UUID of the domain.  If not specified, a fresh unique
-UUID will be generated.
-
-=item B<seclabel="LABEL">
-
-Assign an XSM security label to this domain.
-
-=item B<init_seclabel="LABEL">
-
-Specify an XSM security label used for this domain temporarily during
-its build. The domain's XSM label will be changed to the execution
-seclabel (specified by "seclabel") once the build is complete, prior to
-unpausing the domain. With a properly constructed security policy (such
-as nomigrate_t in the example policy), this can be used to build a
-domain whose memory is not accessible to the toolstack domain.
-
-=item B<nomigrate=BOOLEAN>
-
-Disable migration of this domain.  This enables certain other features
-which are incompatible with migration. Currently this is limited to
-enabling the invariant TSC feature flag in cpuid results when TSC is
-not emulated.
-
-=item B<driver_domain=BOOLEAN>
-
-Specify that this domain is a driver domain. This enables certain
-features needed in order to run a driver domain.
-
-=item B<device_tree=PATH>
-
-Specify a partial device tree (compiled via the Device Tree Compiler).
-Everything under the node "/passthrough" will be copied into the guest
-device tree. For convenience, the node "/aliases" is also copied to allow
-the user to defined aliases which can be used by the guest kernel.
-
-Given the complexity of verifying the validity of a device tree, this
-option should only be used with trusted device tree.
-
-Note that the partial device tree should avoid to use the phandle 65000
-which is reserved by the toolstack.
-
-=back
-
-=head2 Devices
-
-The following options define the paravirtual, emulated and physical
-devices which the guest will contain.
-
-=over 4
-
-=item B<disk=[ "DISK_SPEC_STRING", "DISK_SPEC_STRING", ...]>
-
-Specifies the disks (both emulated disks and Xen virtual block
-devices) which are to be provided to the guest, and what objects on
-the they should map to.  See F<docs/misc/xl-disk-configuration.txt>.
-
-=item B<vif=[ "NET_SPEC_STRING", "NET_SPEC_STRING", ...]>
-
-Specifies the networking provision (both emulated network adapters,
-and Xen virtual interfaces) to provided to the guest.  See
-F<docs/misc/xl-network-configuration.markdown>.
-
-=item B<vtpm=[ "VTPM_SPEC_STRING", "VTPM_SPEC_STRING", ...]>
-
-Specifies the virtual trusted platform module to be
-provided to the guest. Please see F<docs/misc/vtpm.txt>
-for more details.
-
-Each B<VTPM_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
-settings, from the following list:
-
-=over 4
-
-=item C<backend=DOMAIN>
-
-Specify the backend domain name of id. This value is required!
-If this domain is a guest, the backend should be set to the
-vtpm domain name. If this domain is a vtpm, the
-backend should be set to the vtpm manager domain name.
-
-=item C<uuid=UUID>
-
-Specify the uuid of this vtpm device. The uuid is used to uniquely
-identify the vtpm device. You can create one using the uuidgen
-program on unix systems. If left unspecified, a new uuid
-will be randomly generated every time the domain boots.
-If this is a vtpm domain, you should specify a value. The
-value is optional if this is a guest domain.
-
-=back
-
-=item B<vfb=[ "VFB_SPEC_STRING", "VFB_SPEC_STRING", ...]>
-
-Specifies the paravirtual framebuffer devices which should be supplied
-to the domain.
-
-This option does not control the emulated graphics card presented to
-an HVM guest. See L<Emulated VGA Graphics Device> below for how to
-configure the emulated device. If L<Emulated VGA Graphics Device> options
-are used in a PV guest configuration, xl will pick up B<vnc>, B<vnclisten>,
-B<vncpasswd>, B<vncdisplay>, B<vncunused>, B<sdl>, B<opengl> and
-B<keymap> to construct paravirtual framebuffer device for the guest.
-
-Each B<VFB_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
-settings, from the following list:
-
-=over 4
-
-=item C<vnc=BOOLEAN>
-
-Allow access to the display via the VNC protocol.  This enables the
-other VNC-related settings.  The default is to enable this.
-
-=item C<vnclisten="ADDRESS[:DISPLAYNUM]">
-
-Specifies the IP address, and optionally VNC display number, to use.
-
-NB that if you specify the display number here, you should not use
-vncdisplay.
-
-=item C<vncdisplay=DISPLAYNUM>
-
-Specifies the VNC display number to use.  The actual TCP port number
-will be DISPLAYNUM+5900.
-
-NB that you should not use this option if you set the displaynum in the
-vnclisten string.
-
-=item C<vncunused=BOOLEAN>
-
-Requests that the VNC display setup search for a free TCP port to use.
-The actual display used can be accessed with C<xl vncviewer>.
-
-=item C<vncpasswd="PASSWORD">
-
-Specifies the password for the VNC server.
-
-=item C<sdl=BOOLEAN>
-
-Specifies that the display should be presented via an X window (using
-Simple DirectMedia Layer). The default is to not enable this mode.
-
-=item C<display=DISPLAY>
-
-Specifies the X Window display that should be used when the sdl option
-is used.
-
-=item C<xauthority=XAUTHORITY>
-
-Specifies the path to the X authority file that should be used to
-connect to the X server when the sdl option is used.
-
-=item C<opengl=BOOLEAN>
-
-Enable OpenGL acceleration of the SDL display. Only effects machines
-using C<device_model_version="qemu-xen-traditional"> and only if the
-device-model was compiled with OpenGL support. Disabled by default.
-
-=item C<keymap="LANG">
-
-Configure the keymap to use for the keyboard associated with this
-display. If the input method does not easily support raw keycodes
-(e.g. this is often the case when using VNC) then this allows us to
-correctly map the input keys into keycodes seen by the guest. The
-specific values which are accepted are defined by the version of the
-device-model which you are using. See L</"Keymaps"> below or consult the
-L<qemu(1)> manpage. The default is B<en-us>.
-
-=back
-
-=item B<channel=[ "CHANNEL_SPEC_STRING", "CHANNEL_SPEC_STRING", ...]>
-
-Specifies the virtual channels to be provided to the guest. A
-channel is a low-bandwidth, bidirectional byte stream, which resembles
-a serial link. Typical uses for channels include transmitting VM
-configuration after boot and signalling to in-guest agents. Please see
-F<docs/misc/channels.txt> for more details.
-
-Each B<CHANNEL_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
-seettings. Leading and trailing whitespace is ignored in both KEY and
-VALUE. Neither KEY nor VALUE may contain ',', '=' or '"'. Defined values
-are:
-
-=over 4
-
-=item C<backend=DOMAIN>
-
-Specify the backend domain name or id. This parameter is optional. If
-this parameter is omitted then the toolstack domain will be assumed.
-
-=item C<name=NAME>
-
-Specify the string name for this device. This parameter is mandatory.
-This should be a well-known name for the specific application (e.g.
-guest agent) and should be used by the frontend to connect the
-application to the right channel device. There is no formal registry
-of channel names, so application authors are encouraged to make their
-names unique by including domain name and version number in the string
-(e.g. org.mydomain.guestagent.1).
-
-=item C<connection=CONNECTION>
-
-Specify how the backend will be implemented. This following options are
-available:
-
-=over 4
-
-=item B<connection=SOCKET>
-
-The backend will bind a Unix domain socket (at the path given by
-B<path=PATH>), call listen and accept connections. The backend will proxy
-data between the channel and the connected socket.
-
-=item B<connection=PTY>
-
-The backend will create a pty and proxy data between the channel and the
-master device. The command B<xl channel-list> can be used to discover the
-assigned slave device.
-
-=back
-
-=back
-
-=item B<rdm="RDM_RESERVATION_STRING">
-
-(HVM/x86 only) Specifies information about Reserved Device Memory (RDM),
-which is necessary to enable robust device passthrough. One example of RDM
-is reported through ACPI Reserved Memory Region Reporting (RMRR) structure
-on x86 platform.
-
-B<RDM_RESERVE_STRING> has the form C<[KEY=VALUE,KEY=VALUE,...> where:
-
-=over 4
-
-=item B<KEY=VALUE>
-
-Possible B<KEY>s are:
-
-=over 4
-
-=item B<strategy="STRING">
-
-Currently there is only one valid type:
-
-"host" means all reserved device memory on this platform should be checked to
-reserve regions in this VM's guest address space. This global rdm parameter
-allows user to specify reserved regions explicitly, and using "host" includes
-all reserved regions reported on this platform, which is useful when doing
-hotplug.
-
-By default this isn't set so we don't check all rdms. Instead, we just check
-rdm specific to a given device if you're assigning this kind of device. Note
-this option is not recommended unless you can make sure any conflict does 
exist.
-
-For example, you're trying to set "memory = 2800" to allocate memory to one
-given VM but the platform owns two RDM regions like,
-
-Device A [sbdf_A]: RMRR region_A: base_addr ac6d3000 end_address ac6e6fff
-Device B [sbdf_B]: RMRR region_B: base_addr ad800000 end_address afffffff
-
-In this conflict case,
-
-#1. If B<strategy> is set to "host", for example,
-
-rdm = "strategy=host,policy=strict" or rdm = "strategy=host,policy=relaxed"
-
-It means all conflicts will be handled according to the policy
-introduced by B<policy> as described below.
-
-#2. If B<strategy> is not set at all, but
-
-pci = [ 'sbdf_A, rdm_policy=xxxxx' ]
-
-It means only one conflict of region_A will be handled according to the policy
-introduced by B<rdm_policy="STRING"> as described inside pci options.
-
-=item B<policy="STRING">
-
-Specifies how to deal with conflicts when reserving reserved device
-memory in guest address space.
-
-When that conflict is unsolved,
-
-"strict" means VM can't be created, or the associated device can't be
-attached in the case of hotplug.
-
-"relaxed" allows VM to be created but may cause VM to crash if
-pass-through device accesses RDM. For exampl,e Windows IGD GFX driver
-always accessed RDM regions so it leads to VM crash.
-
-Note this may be overridden by rdm_policy option in PCI device configuration.
-
-=back
-
-=back
-
-=item B<usbctrl=[ "USBCTRL_SPEC_STRING", "USBCTRL_SPEC_STRING", ... ]>
-
-Specifies the USB controllers created for this guest. Each
-B<USB_SPEC_STRING> has the form C<KEY=VALUE,KEY=VALUE,...> where:
-
-=over 4
-
-=item B<KEY=VALUE>
-
-Possible B<KEY>s are:
-
-=over 4
-
-=item B<type=TYPE>
-
-Specifies the usb controller type.
-
-"pv" denotes a kernel based pvusb backend.
-
-"qusb" specifies a qemu base backend for pvusb.
-
-"auto" (the default) determines whether a kernel based backend is installed.
-If this is the case, "pv" is selected, "qusb" will be selected if no kernel
-backend is currently available.
-
-=item B<version=VERSION>
-
-Specifies the usb controller version.  Possible values include
-1 (USB1.1) and 2 (USB2.0). Default is 2 (USB2.0).
-
-=item B<ports=PORTS>
-
-Specifies the total ports of the usb controller. The maximum
-number is 31. Default is 8.
-
-USB controler ids start from 0.  In line with the USB spec, however,
-ports on a controller start from 1.
-
-E.g.
-usbctrl=["version=1,ports=4", "version=2,ports=8",]
-The first controller has:
-controller id = 0, and port 1,2,3,4.
-The second controller has:
-controller id = 1, and port 1,2,3,4,5,6,7,8.
-
-=back
-
-=back
-
-=item B<usbdev=[ "USB_SPEC_STRING", "USB_SPEC_STRING", ... ]>
-
-Specifies the USB devices to be attached to the guest at boot. Each
-B<USB_SPEC_STRING> has the form C<KEY=VALUE,KEY=VALUE,...> where:
-
-=over 4
-
-=item B<KEY=VALUE>
-
-Possible B<KEY>s are:
-
-=over 4
-
-=item B<devtype=hostdev>
-
-Specifies USB device type. Currently only support 'hostdev'.
-
-=item B<hostbus=busnum>
-
-Specifies busnum of the USB device from the host perspective.
-
-=item B<hostaddr=devnum>
-
-Specifies devnum of the USB device from the host perspective.
-
-=item B<controller=CONTROLLER>
-
-Specifies USB controller id, to which controller the USB device is attached.
-
-=item B<port=PORT>
-
-Specifies USB port, to which port the USB device is attached. B<port=PORT>
-is valid only when B<controller=CONTROLLER> is specified.
-
-=back
-
-If no controller is specified, an available controller:port combination
-will be used.  If there are no available controller:port options,
-a new controller will be created.
-
-=back
-
-=item B<pci=[ "PCI_SPEC_STRING", "PCI_SPEC_STRING", ... ]>
-
-Specifies the host PCI devices to passthrough to this guest. Each 
B<PCI_SPEC_STRING>
-has the form C<[DDDD:]BB:DD.F[@VSLOT],KEY=VALUE,KEY=VALUE,...> where:
-
-=over 4
-
-=item B<DDDD:BB:DD.F>
-
-Identifies the PCI device from the host perspective in domain
-(B<DDDD>), Bus (B<BB>), Device (B<DD>) and Function (B<F>) syntax. This is
-the same scheme as used in the output of C<lspci> for the device in
-question. Note: By default C<lspci> will omit the domain (B<DDDD>) if it
-is zero and it is optional here also. You may specify the function
-(B<F>) as B<*> to indicate all functions.
-
-=item B<@VSLOT>
-
-Specifies the virtual device where the guest will see this
-device. This is equivalent to the B<DD> which the guest sees. In a
-guest B<DDDD> and B<BB> are C<0000:00>.
-
-=item B<KEY=VALUE>
-
-Possible B<KEY>s are:
-
-=over 4
-
-=item B<permissive=BOOLEAN>
-
-By default pciback only allows PV guests to write "known safe" values
-into PCI config space, likewise QEMU (both qemu-xen and
-qemu-traditional) imposes the same contraint on HVM guests. However
-many devices require writes to other areas of config space in order to
-operate properly.  This option tells the backend (pciback or QEMU) to
-allow all writes to PCI config space of this device by this domain.
-
-This option should be enabled with caution: it gives the guest much
-more control over the device, which may have security or stability
-implications.  It is recommended to enable this option only for
-trusted VMs under administrator control.
-
-=item B<msitranslate=BOOLEAN>
-
-Specifies that MSI-INTx translation should be turned on for the PCI
-device. When enabled, MSI-INTx translation will always enable MSI on
-the PCI device regardless whether the guest uses INTx or MSI. Some
-device drivers, such as NVIDIA's, detect an inconsistency and do not
-function when this option is enabled. Therefore the default is false (0).
-
-=item B<seize=BOOLEAN>
-
-Tells xl to automatically attempt to re-assign a device to
-pciback if it is not already assigned.
-
-WARNING: If you set this option, xl will gladly re-assign a critical
-system device, such as a network or a disk controller being used by
-dom0 without confirmation.  Please use with care.
-
-=item B<power_mgmt=BOOLEAN>
-
-(HVM only) Specifies that the VM should be able to program the
-D0-D3hot power management states for the PCI device. False (0) by
-default.
-
-=item B<rdm_policy="STRING">
-
-(HVM/x86 only) This is same as policy option inside the rdm option but
-just specific to a given device. Therefore the default is "relaxed" as
-same as policy option as well.
-
-Note this would override global B<rdm> option.
-
-=back
-
-=back
-
-=item B<pci_permissive=BOOLEAN>
-
-Changes the default value of 'permissive' for all PCI devices passed
-through to this VM. See L<permissive|/"permissive_boolean"> above.
-
-=item B<pci_msitranslate=BOOLEAN>
-
-Changes the default value of 'msitranslate' for all PCI devices passed
-through to this VM. See L<msitranslate|/"msitranslate_boolean"> above.
-
-=item B<pci_seize=BOOLEAN>
-
-Changes the default value of 'seize' for all PCI devices passed
-through to this VM. See L<seize|/"seize_boolean"> above.
-
-=item B<pci_power_mgmt=BOOLEAN>
-
-(HVM only) Changes the default value of 'power_mgmt' for all PCI
-devices passed through to this VM. See L<power_mgt|/"power_mgmt_boolean">
-above.
-
-=item B<gfx_passthru=BOOLEAN|"STRING">
-
-Enable graphics device PCI passthrough. This option makes an assigned
-PCI graphics card become primary graphics card in the VM. The QEMU
-emulated graphics adapter is disabled and the VNC console for the VM
-will not have any graphics output. All graphics output, including boot
-time QEMU BIOS messages from the VM, will go to the physical outputs
-of the passedthrough physical graphics card.
-
-The graphics card PCI device to passthrough is chosen with B<pci>
-option, exactly in the same way as normal Xen PCI device
-passthrough/assignment is done.  Note that gfx_passthru does not do
-any kind of sharing of the GPU, so you can only assign the GPU to one
-single VM at a time.
-
-gfx_passthru also enables various legacy VGA memory ranges, BARs, MMIOs,
-and ioports to be passed thru to the VM, since those are required
-for correct operation of things like VGA BIOS, text mode, VBE, etc.
-
-Enabling gfx_passthru option also copies the physical graphics card
-video BIOS to the guest memory, and executes the VBIOS in the guest
-to initialize the graphics card.
-
-Most graphics adapters require vendor specific tweaks for properly
-working graphics passthrough. See the XenVGAPassthroughTestedAdapters
-L<http://wiki.xen.org/wiki/XenVGAPassthroughTestedAdapters> wiki page
-for currently supported graphics cards for gfx_passthru.
-
-gfx_passthru is currently supported both with the qemu-xen-traditional
-device-model and upstream qemu-xen device-model.
-
-When given as a boolean the B<gfx_passthru> option either disables gfx
-passthru or enables autodetection.
-
-But when given as a string the B<gfx_passthru> option describes the type
-of device to enable. Note this behavior is only supported with the upstream
-qemu-xen device-model. With qemu-xen-traditional IGD is always assumed
-and other options than autodetect or explicit IGD will result in an error.
-
-Currently, valid options are:
-
-=over 4
-
-=item B<gfx_passthru=0>
-
-Disables graphics device PCI passthrough.
-
-=item B<gfx_passthru=1>, B<gfx_passthru="default">
-
-Enables graphics device PCI passthrough and autodetects the type of device
-which is being used.
-
-=item "igd"
-
-Enables graphics device PCI passthrough but forcing the type of device to
-Intel Graphics Device.
-
-=back
-
-Note that some graphics adapters (AMD/ATI cards, for example) do not
-necessarily require gfx_passthru option, so you can use the normal Xen
-PCI passthrough to assign the graphics card as a secondary graphics
-card to the VM. The QEMU-emulated graphics card remains the primary
-graphics card, and VNC output is available from the QEMU-emulated
-primary adapter.
-
-More information about Xen gfx_passthru feature is available
-on the XenVGAPassthrough L<http://wiki.xen.org/wiki/XenVGAPassthrough>
-wiki page.
-
-=item B<rdm_mem_boundary=MBYTES>
-
-Number of megabytes to set a boundary for checking rdm conflict.
-
-When RDM conflicts with RAM, RDM probably scatter the whole RAM space.
-Especially multiple RDM entries would worsen this to lead a complicated
-memory layout. So here we're trying to figure out a simple solution to
-avoid breaking existing layout. So when a conflict occurs,
-
-    #1. Above a predefined boundary
-        - move lowmem_end below reserved region to solve conflict;
-
-    #2. Below a predefined boundary
-        - Check strict/relaxed policy.
-        "strict" policy leads to fail libxl. Note when both policies
-        are specified on a given region, 'strict' is always preferred.
-        "relaxed" policy issue a warning message and also mask this
-        entry INVALID to indicate we shouldn't expose this entry to
-        hvmloader.
-
-Here the default is 2G.
-
-=item B<dtdev=[ "DTDEV_PATH", "DTDEV_PATH", ... ]>
-
-Specifies the host device tree nodes to passthrough to this guest. Each
-DTDEV_PATH is the absolute path in the device tree.
-
-=item B<ioports=[ "IOPORT_RANGE", "IOPORT_RANGE", ... ]>
-
-Allow guest to access specific legacy I/O ports. Each B<IOPORT_RANGE>
-is given in hexadecimal and may either a span e.g. C<2f8-2ff>
-(inclusive) or a single I/O port C<2f8>.
-
-It is recommended to use this option only for trusted VMs under
-administrator control.
-
-=item B<iomem=[ "IOMEM_START,NUM_PAGES[@GFN]", "IOMEM_START,NUM_PAGES[@GFN]", 
... ]>
-
-Allow auto-translated domains to access specific hardware I/O memory pages.
-
-B<IOMEM_START> is a physical page number. B<NUM_PAGES> is the number of pages
-beginning with B<START_PAGE> to allow access. B<GFN> specifies the guest frame
-number where the mapping will start in the domU's address space. If B<GFN> is
-not given, the mapping will be performed using B<IOMEM_START> as a start in the
-domU's address space, therefore performing an 1:1 mapping as default.
-All of these values must be given in hexadecimal.
-
-Note that the IOMMU won't be updated with the mappings specified with this
-option. This option therefore should not be used to passthrough any
-IOMMU-protected device.
-
-It is recommended to use this option only for trusted VMs under
-administrator control.
-
-=item B<irqs=[ NUMBER, NUMBER, ... ]>
-
-Allow a guest to access specific physical IRQs.
-
-It is recommended to use this option only for trusted VMs under
-administrator control.
-
-=item B<max_event_channels=N>
-
-Limit the guest to using at most N event channels (PV interrupts).
-Guests use hypervisor resources for each event channel they use.
-
-The default of 1023 should be sufficient for typical guests.  The
-maximum value depends what the guest supports.  Guests supporting the
-FIFO-based event channel ABI support up to 131,071 event channels.
-Other guests are limited to 4095 (64-bit x86 and ARM) or 1023 (32-bit
-x86).
-
-=back
-
-=head2 Paravirtualised (PV) Guest Specific Options
-
-The following options apply only to Paravirtual guests.
-
-=over 4
-
-=item B<bootloader="PROGRAM">
-
-Run C<PROGRAM> to find the kernel image and ramdisk to use.  Normally
-C<PROGRAM> would be C<pygrub>, which is an emulation of
-grub/grub2/syslinux. Either B<kernel> or B<bootloader> must be specified
-for PV guests.
-
-=item B<bootloader_args=[ "ARG", "ARG", ...]>
-
-Append B<ARG>s to the arguments to the B<bootloader>
-program. Alternatively if the argument is a simple string then it will
-be split into words at whitespace (this second option is deprecated).
-
-=item B<e820_host=BOOLEAN>
-
-Selects whether to expose the host e820 (memory map) to the guest via
-the virtual e820. When this option is false (0) the guest pseudo-physical
-address space consists of a single contiguous RAM region. When this
-option is specified the virtual e820 instead reflects the host e820
-and contains the same PCI holes. The total amount of RAM represented
-by the memory map is always the same, this option configures only how
-it is laid out.
-
-Exposing the host e820 to the guest gives the guest kernel the
-opportunity to set aside the required part of its pseudo-physical
-address space in order to provide address space to map passedthrough
-PCI devices. It is guest Operating System dependent whether this
-option is required, specifically it is required when using a mainline
-Linux ("pvops") kernel. This option defaults to true (1) if any PCI
-passthrough devices are configured and false (0) otherwise. If you do not
-configure any passthrough devices at domain creation time but expect
-to hotplug devices later then you should set this option. Conversely
-if your particular guest kernel does not require this behaviour then
-it is safe to allow this to be enabled but you may wish to disable it
-anyway.
-
-=item B<pvh=BOOLEAN>
-
-Selects whether to run this PV guest in an HVM container. Default is 0.
-
-=back
-
-=head2 Fully-virtualised (HVM) Guest Specific Options
-
-The following options apply only to HVM guests.
-
-=head3 Boot Device
-
-=over 4
-
-=item B<boot=[c|d|n]>
-
-Selects the emulated virtual device to boot from. Options are hard
-disk (B<c>), cd-rom (B<d>) or network/PXE (B<n>). Multiple options can be
-given and will be attempted in the order they are given. e.g. to boot
-from cd-rom but fallback to the hard disk you can give B<dc>. The
-default is B<cd>.
-
-=back
-
-=head3 Emulated disk controller type
-
-=over 4
-
-=item B<hdtype="STRING">
-
-Select the hd disk type (ide|ahci).
-If hdtype=ahci adds ich9 disk controller in AHCI mode and uses it with
-upstream qemu to emulate disks instead of IDE. It decreases boot time
-but may not be supported by default in Windows xp and older Windows.
-The default is ide.
-
-=back
-
-=head3 Paging
-
-The following options control the mechanisms used to virtualise guest
-memory.  The defaults are selected to give the best results for the
-common case and so you should normally leave these options
-unspecified.
-
-=over 4
-
-=item B<hap=BOOLEAN>
-
-Turns "hardware assisted paging" (the use of the hardware nested page
-table feature) on or off.  This feature is called EPT (Extended Page
-Tables) by Intel and NPT (Nested Page Tables) or RVI (Rapid
-Virtualisation Indexing) by AMD.  Affects HVM guests only.  If turned
-off, Xen will run the guest in "shadow page table" mode where the
-guest's page table updates and/or TLB flushes etc. will be emulated.
-Use of HAP is the default when available.
-
-=item B<oos=BOOLEAN>
-
-Turns "out of sync pagetables" on or off.  When running in shadow page
-table mode, the guest's page table updates may be deferred as
-specified in the Intel/AMD architecture manuals.  However this may
-expose unexpected bugs in the guest, or find bugs in Xen, so it is
-possible to disable this feature.  Use of out of sync page tables,
-when Xen thinks it appropriate, is the default.
-
-=item B<shadow_memory=MBYTES>
-
-Number of megabytes to set aside for shadowing guest pagetable pages
-(effectively acting as a cache of translated pages) or to use for HAP
-state. By default this is 1MB per guest vcpu plus 8KB per MB of guest
-RAM. You should not normally need to adjust this value. However if you
-are not using hardware assisted paging (i.e. you are using shadow
-mode) and your guest workload consists of a a very large number of
-similar processes then increasing this value may improve performance.
-
-=back
-
-=head3 Processor and Platform Features
-
-The following options allow various processor and platform level
-features to be hidden or exposed from the guest's point of view. This
-can be useful when running older guest Operating Systems which may
-misbehave when faced with more modern features. In general you should
-accept the defaults for these options wherever possible.
-
-=over 4
-
-=item B<bios="STRING">
-
-Select the virtual firmware that is exposed to the guest.
-By default, a guess is made based on the device model, but sometimes
-it may be useful to request a different one, like UEFI.
-
-=over 4
-
-=item B<rombios>
-
-Loads ROMBIOS, a 16-bit x86 compatible BIOS. This is used by default
-when device_model_version=qemu-xen-traditional. This is the only BIOS
-option supported when device_model_version=qemu-xen-traditional. This is
-the BIOS used by all previous Xen versions.
-
-=item B<seabios>
-
-Loads SeaBIOS, a 16-bit x86 compatible BIOS. This is used by default
-with device_model_version=qemu-xen.
-
-=item B<ovmf>
-
-Loads OVMF, a standard UEFI firmware by Tianocore project.
-Requires device_model_version=qemu-xen.
-
-=back
-
-=item B<pae=BOOLEAN>
-
-Hide or expose the IA32 Physical Address Extensions. These extensions
-make it possible for a 32 bit guest Operating System to access more
-than 4GB of RAM. Enabling PAE also enabled other features such as
-NX. PAE is required if you wish to run a 64-bit guest Operating
-System. In general you should leave this enabled and allow the guest
-Operating System to choose whether or not to use PAE. (X86 only)
-
-=item B<acpi=BOOLEAN>
-
-Expose ACPI (Advanced Configuration and Power Interface) tables from
-the virtual firmware to the guest Operating System. ACPI is required
-by most modern guest Operating Systems. This option is enabled by
-default and usually you should omit it. However it may be necessary to
-disable ACPI for compatibility with some guest Operating Systems.
-
-=item B<acpi_s3=BOOLEAN>
-
-Include the S3 (suspend-to-ram) power state in the virtual firmware
-ACPI table. True (1) by default.
-
-=item B<acpi_s4=BOOLEAN>
-
-Include S4 (suspend-to-disk) power state in the virtual firmware ACPI
-table. True (1) by default.
-
-=item B<apic=BOOLEAN>
-
-Include information regarding APIC (Advanced Programmable Interrupt
-Controller) in the firmware/BIOS tables on a single processor
-guest. This causes the MP (multiprocessor) and PIR (PCI Interrupt
-Routing) tables to be exported by the virtual firmware. This option
-has no effect on a guest with multiple virtual CPUS as they must
-always include these tables. This option is enabled by default and you
-should usually omit it but it may be necessary to disable these
-firmware tables when using certain older guest Operating
-Systems. These tables have been superseded by newer constructs within
-the ACPI tables. (X86 only)
-
-=item B<nx=BOOLEAN>
-
-Hides or exposes the No-eXecute capability. This allows a guest
-Operating system to map pages such that they cannot be executed which
-can enhance security. This options requires that PAE also be
-enabled. (X86 only)
-
-=item B<hpet=BOOLEAN>
-
-Enables or disables HPET (High Precision Event Timer). This option is
-enabled by default and you should usually omit it. It may be necessary
-to disable the HPET in order to improve compatibility with guest
-Operating Systems (X86 only)
-
-=item B<altp2mhvm=BOOLEAN>
-
-Enables or disables hvm guest access to alternate-p2m capability.
-Alternate-p2m allows a guest to manage multiple p2m guest physical
-"memory views" (as opposed to a single p2m). This option is
-disabled by default and is available only to hvm domains.
-You may want this option if you want to access-control/isolate
-access to specific guest physical memory pages accessed by
-the guest, e.g. for HVM domain memory introspection or
-for isolation/access-control of memory between components within
-a single guest hvm domain.
-
-=item B<nestedhvm=BOOLEAN>
-
-Enable or disables guest access to hardware virtualisation features,
-e.g. it allows a guest Operating System to also function as a
-hypervisor. This option is disabled by default. You may want this
-option if you want to run another hypervisor (including another copy
-of Xen) within a Xen guest or to support a guest Operating System
-which uses hardware virtualisation extensions (e.g. Windows XP
-compatibility mode on more modern Windows OS).
-
-=item B<cpuid="LIBXL_STRING"> or B<cpuid=[ "XEND_STRING", "XEND_STRING" ]>
-
-Configure the value returned when a guest executes CPUID instruction.
-Two versions of config syntax are recognized: libxl and xend.
-
-The libxl syntax is a comma separated list of key=value pairs, preceded by the
-word "host". A few keys take a numerical value, all others take a single
-character which describes what to do with the feature bit.
-
-Possible values for a single feature bit:
-  '1' -> force the corresponding bit to 1
-  '0' -> force to 0
-  'x' -> Get a safe value (pass through and mask with the default policy)
-  'k' -> pass through the host bit value
-  's' -> as 'k' but preserve across save/restore and migration (not 
implemented)
-
-Note: when specifying B<cpuid> for hypervisor leaves (0x4000xxxx major group)
-only the lowest 8 bits of leaf's 0x4000xx00 EAX register are processed, the 
rest
-are ignored (these 8 bits signify maximum number of hypervisor leaves).
-
-List of keys taking a value:
-apicidsize brandid clflush family localapicid maxleaf maxhvleaf model nc
-proccount procpkg stepping
-
-List of keys taking a character:
-3dnow 3dnowext 3dnowprefetch abm acpi aes altmovcr8 apic avx clfsh cmov
-cmplegacy cmpxchg16 cmpxchg8 cntxid dca de ds dscpl dtes64 est extapic f16c
-ffxsr fma4 fpu fxsr htt hypervisor ia64 ibs lahfsahf lm lwp mca mce misalignsse
-mmx mmxext monitor movbe msr mtrr nodeid nx osvw osxsave pae page1gb pat pbe
-pclmulqdq pdcm pge popcnt pse pse36 psn rdtscp skinit smx ss sse sse2 sse3
-sse4_1 sse4_2 sse4a ssse3 svm svm_decode svm_lbrv svm_npt svm_nrips
-svm_pausefilt svm_tscrate svm_vmcbclean syscall sysenter tbm tm tm2 topoext tsc
-vme vmx wdt x2apic xop xsave xtpr
-
-The xend syntax is a list of values in the form of
-'leafnum:register=bitstring,register=bitstring'
-  "leafnum" is the requested function,
-  "register" is the response register to modify
-  "bitstring" represents all bits in the register, its length must be 32 chars.
-  Each successive character represent a lesser-significant bit, possible values
-  are listed above in the libxl section.
-
-Example to hide two features from the guest: 'tm', which is bit #29 in EDX, and
-'pni' (SSE3), which is bit #0 in ECX:
-
-xend: [ 
'1:ecx=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx0,edx=xx0xxxxxxxxxxxxxxxxxxxxxxxxxxxxx' ]
-
-libxl: 'host,tm=0,sse3=0'
-
-More info about the CPUID instruction can be found in the processor manuals, 
and
-in Wikipedia: L<http://en.wikipedia.org/wiki/CPUID>
-
-=item B<acpi_firmware="STRING">
-
-Specify a path to a file that contains extra ACPI firmware tables to pass in to
-a guest. The file can contain several tables in their binary AML form
-concatenated together. Each table self describes its length so no additional
-information is needed. These tables will be added to the ACPI table set in the
-guest. Note that existing tables cannot be overridden by this feature. For
-example this cannot be used to override tables like DSDT, FADT, etc.
-
-=item B<smbios_firmware="STRING">
-
-Specify a path to a file that contains extra SMBIOS firmware structures to pass
-in to a guest. The file can contain a set DMTF predefined structures which will
-override the internal defaults. Not all predefined structures can be 
overridden,
-only the following types: 0, 1, 2, 3, 11, 22, 39. The file can also contain any
-number of vendor defined SMBIOS structures (type 128 - 255). Since SMBIOS
-structures do not present their overall size, each entry in the file must be
-preceded by a 32b integer indicating the size of the next structure.
-
-=item B<ms_vm_genid="OPTION">
-
-Provide a VM generation ID to the guest.
-
-The VM generation ID as a 128-bit random number that a guest may use
-to determine if the guest has been restored from an earlier snapshot
-or cloned.
-
-This is required for Microsoft Windows Server 2012 (and later) domain
-controllers.
-
-Valid options are:
-
-=over 4
-
-=item B<"generate">
-
-Generate a random VM generation ID every time the domain is created or
-restored.
-
-=item B<"none">
-
-Do not provide a VM generation ID.
-
-=back
-
-See also "Virtual Machine Generation ID" by Microsoft
-(http://www.microsoft.com/en-us/download/details.aspx?id=30707).
-
-=back 
-
-=head3 Guest Virtual Time Controls
-
-=over 4
-
-=item B<tsc_mode="MODE">
-
-Specifies how the TSC (Time Stamp Counter) should be provided to the
-guest (X86 only). Specifying this option as a number is
-deprecated. Options are:
-
-=over 4
-
-=item B<"default">
-
-Guest rdtsc/p is executed natively when monotonicity can be guaranteed
-and emulated otherwise (with frequency scaled if necessary).
-
-If a HVM container in B<default> TSC mode is created on a host that
-provides constant host TSC, its guest TSC frequency will be the same
-as the host. If it is later migrated to another host that provide
-constant host TSC and supports Intel VMX TSC scaling/AMD SVM TSC
-ratio, its guest TSC frequency will be the same before and after
-migration, and guest rdtsc/p will be executed natively as well after
-migration.
-
-=item B<"always_emulate">
-
-Guest rdtsc/p always emulated at 1GHz (kernel and user). Guest rdtsc/p
-always emulated and the virtual TSC will appear to increment (kernel
-and user) at a fixed 1GHz rate, regardless of the PCPU HZ rate or
-power state; Although there is an overhead associated with emulation
-this will NOT affect underlying CPU performance.
-
-=item B<"native">
-
-Guest rdtsc always executed natively (no monotonicity/frequency
-guarantees); guest rdtscp emulated at native frequency if unsupported
-by h/w, else executed natively.
-
-=item B<"native_paravirt">
-
-Same as B<native>, except xen manages TSC_AUX register so guest can
-determine when a restore/migration has occurred and assumes guest
-obtains/uses pvclock-like mechanism to adjust for monotonicity and
-frequency changes.
-
-If a HVM container in B<native_paravirt> TSC mode can execute both guest
-rdtsc and guest rdtscp natively, then the guest TSC frequency will be
-determined in the similar way to that of B<default> TSC mode.
-
-=back
-
-Please see F<docs/misc/tscmode.txt> for more information on this option.
-
-=item B<localtime=BOOLEAN>
-
-Set the real time clock to local time or to UTC. False (0) by default,
-i.e. set to UTC.
-
-=item B<rtc_timeoffset=SECONDS>
-
-Set the real time clock offset in seconds. False (0) by default.
-
-=item B<vpt_align=BOOLEAN>
-
-Specifies that periodic Virtual Platform Timers should be aligned to
-reduce guest interrupts. Enabling this option can reduce power
-consumption, especially when a guest uses a high timer interrupt
-frequency (HZ) values. The default is true (1).
-
-=item B<timer_mode=MODE>
-
-Specifies the mode for Virtual Timers. The valid values are as follows:
-
-=over 4
-
-=item B<"delay_for_missed_ticks">
-
-Delay for missed ticks. Do not advance a vcpu's time beyond the
-correct delivery time for interrupts that have been missed due to
-preemption. Deliver missed interrupts when the vcpu is rescheduled and
-advance the vcpu's virtual time stepwise for each one.
-
-=item B<"no_delay_for_missed_ticks">
-
-No delay for missed ticks. As above, missed interrupts are delivered,
-but guest time always tracks wallclock (i.e., real) time while doing
-so.
-
-=item B<"no_missed_ticks_pending">
-
-No missed interrupts are held pending. Instead, to ensure ticks are
-delivered at some non-zero rate, if we detect missed ticks then the
-internal tick alarm is not disabled if the VCPU is preempted during
-the next tick period.
-
-=item B<"one_missed_tick_pending">
-
-One missed tick pending. Missed interrupts are collapsed
-together and delivered as one 'late tick'.  Guest time always tracks
-wallclock (i.e., real) time.
-
-=back
-
-=back
-
-=head3 Memory layout
-
-=over 4
-
-=item B<mmio_hole=MBYTES>
-
-Specifies the size the MMIO hole below 4GiB will be.  Only valid for
-device_model_version = "qemu-xen".
-
-Cannot be smaller than 256. Cannot be larger than 3840.
-
-Known good large value is 3072.
-
-=back
-
-=head3 Support for Paravirtualisation of HVM Guests
-
-The following options allow Paravirtualised features (such as devices)
-to be exposed to the guest Operating System in an HVM guest.
-Utilising these features requires specific guest support but when
-available they will result in improved performance.
-
-=over 4
-
-=item B<xen_platform_pci=BOOLEAN>
-
-Enable or disable the Xen platform PCI device.  The presence of this
-virtual device enables a guest Operating System (subject to the
-availability of suitable drivers) to make use of paravirtualisation
-features such as disk and network devices etc. Enabling these drivers
-improves performance and is strongly recommended when available. PV
-drivers are available for various Operating Systems including HVM
-Linux L<http://wiki.xen.org/wiki/XenLinuxPVonHVMdrivers> and Microsoft
-Windows L<http://wiki.xen.org/wiki/XenWindowsGplPv>.
-
-Setting B<xen_platform_pci=0> with the default device_model "qemu-xen"
-requires at least QEMU 1.6.
-
-=item B<viridian=[ "GROUP", "GROUP", ...]>
-
-The groups of Microsoft Hyper-V (AKA viridian) compatible enlightenments
-exposed to the guest. The following groups of enlightenments may be
-specified:
-
-=over 4
-
-=item B<base>
-
-This group incorporates the Hypercall MSRs, Virtual processor index MSR,
-and APIC access MSRs. These enlightenments can improve performance of
-Windows Vista and Windows Server 2008 onwards and setting this option
-for such guests is strongly recommended.
-This group is also a pre-requisite for all others. If it is disabled
-then it is an error to attempt to enable any other group.
-
-=item B<freq>
-
-This group incorporates the TSC and APIC frequency MSRs. These
-enlightenments can improve performance of Windows 7 and Windows
-Server 2008 R2 onwards.
-
-=item B<time_ref_count>
-
-This group incorporates Partition Time Reference Counter MSR. This
-enlightenment can improve performance of Windows 8 and Windows
-Server 2012 onwards.
-
-=item B<reference_tsc>
-
-This set incorporates the Partition Reference TSC MSR. This
-enlightenment can improve performance of Windows 7 and Windows
-Server 2008 R2 onwards.
-
-=item B<hcall_remote_tlb_flush>
-
-This set incorporates use of hypercalls for remote TLB flushing.
-This enlightenment may improve performance of Windows guests running
-on hosts with higher levels of (physical) CPU contention.
-
-=item B<apic_assist>
-
-This set incorporates use of the APIC assist page to avoid EOI of
-the local APIC.
-This enlightenment may improve performance of guests that make use of
-per-vcpu event channel upcall vectors.
-Note that this enlightenment will have no effect if the guest is
-using APICv posted interrupts.
-
-=item B<defaults>
-
-This is a special value that enables the default set of groups, which
-is currently the B<base>, B<freq>, B<time_ref_count> and B<apic_assist>
-groups.
-
-=item B<all>
-
-This is a special value that enables all available groups.
-
-=back
-
-Groups can be disabled by prefixing the name with '!'. So, for example,
-to enable all groups except B<freq>, specify:
-
-=over 4
-
-B<viridian=[ "all", "!freq" ]>
-
-=back
-
-For details of the enlightenments see the latest version of Microsoft's
-Hypervisor Top-Level Functional Specification.
-
-The enlightenments should be harmless for other versions of Windows
-(although they will not give any benefit) and the majority of other
-non-Windows OSes.
-However it is known that they are incompatible with some other Operating
-Systems and in some circumstance can prevent Xen's own paravirtualisation
-interfaces for HVM guests from being used.
-
-The viridian option can be specified as a boolean. A value of true (1)
-is equivalent to the list [ "defaults" ], and a value of false (0) is
-equivalent to an empty list.
-
-=back
-
-=head3 Emulated VGA Graphics Device
-
-The following options control the features of the emulated graphics
-device.  Many of these options behave similarly to the equivalent key
-in the B<VFB_SPEC_STRING> for configuring virtual frame buffer devices
-(see above).
-
-=over 4
-
-=item B<videoram=MBYTES>
-
-Sets the amount of RAM which the emulated video card will contain,
-which in turn limits the resolutions and bit depths which will be
-available.
-
-When using the qemu-xen-traditional device-model, the default as well as
-minimum amount of video RAM for stdvga is 8 MB, which is sufficient for e.g.
-1600x1200 at 32bpp. For the upstream qemu-xen device-model, the default and
-minimum is 16 MB.
-
-When using the emulated Cirrus graphics card (B<vga="cirrus">) and the
-qemu-xen-traditional device-model, the amount of video RAM is fixed at 4 MB,
-which is sufficient for 1024x768 at 32 bpp. For the upstream qemu-xen
-device-model, the default and minimum is 8 MB.
-
-For B<qxl> vga, the default is both default and minimal 128MB.
-If B<videoram> is set less than 128MB, an error will be triggered.
-
-=item B<stdvga=BOOLEAN>
-
-Select a standard VGA card with VBE (VESA BIOS Extensions) as the
-emulated graphics device. The default is false (0) which means to emulate
-a Cirrus Logic GD5446 VGA card. If your guest supports VBE 2.0 or
-later (e.g. Windows XP onwards) then you should enable this.
-stdvga supports more video ram and bigger resolutions than Cirrus.
-This option is deprecated, use vga="stdvga" instead.
-
-=item B<vga="STRING">
-
-Selects the emulated video card (none|stdvga|cirrus|qxl).
-The default is cirrus.
-
-In general, QXL should work with the Spice remote display protocol
-for acceleration, and QXL driver is necessary in guest in this case.
-QXL can also work with the VNC protocol, but it will be like a standard
-VGA without acceleration.
-
-=item B<vnc=BOOLEAN>
-
-Allow access to the display via the VNC protocol.  This enables the
-other VNC-related settings.  The default is to enable this.
-
-=item B<vnclisten="ADDRESS[:DISPLAYNUM]">
-
-Specifies the IP address, and optionally VNC display number, to use.
-
-=item B<vncdisplay=DISPLAYNUM>
-
-Specifies the VNC display number to use. The actual TCP port number
-will be DISPLAYNUM+5900.
-
-=item B<vncunused=BOOLEAN>
-
-Requests that the VNC display setup search for a free TCP port to use.
-The actual display used can be accessed with C<xl vncviewer>.
-
-=item B<vncpasswd="PASSWORD">
-
-Specifies the password for the VNC server.
-
-=item B<keymap="LANG">
-
-Configure the keymap to use for the keyboard associated with this
-display. If the input method does not easily support raw keycodes
-(e.g. this is often the case when using VNC) then this allows us to
-correctly map the input keys into keycodes seen by the guest. The
-specific values which are accepted are defined by the version of the
-device-model which you are using. See L</"Keymaps"> below or consult the
-L<qemu(1)> manpage. The default is B<en-us>.
-
-=item B<sdl=BOOLEAN>
-
-Specifies that the display should be presented via an X window (using
-Simple DirectMedia Layer). The default is not to enable this mode.
-
-=item B<opengl=BOOLEAN>
-
-Enable OpenGL acceleration of the SDL display. Only effects machines
-using B<device_model_version="qemu-xen-traditional"> and only if the
-device-model was compiled with OpenGL support. False (0) by default.
-
-=item B<nographic=BOOLEAN>
-
-Enable or disable the virtual graphics device.  The default is to
-provide a VGA graphics device but this option can be used to disable
-it.
-
-=back
-
-=head3 Spice Graphics Support
-
-The following options control the features of SPICE.
-
-=over 4
-
-=item B<spice=BOOLEAN>
-
-Allow access to the display via the SPICE protocol.  This enables the
-other SPICE-related settings.
-
-=item B<spicehost="ADDRESS">
-
-Specify the interface address to listen on if given, otherwise any
-interface.
-
-=item B<spiceport=NUMBER>
-
-Specify the port to listen on by the SPICE server if the SPICE is
-enabled.
-
-=item B<spicetls_port=NUMBER>
-
-Specify the secure port to listen on by the SPICE server if the SPICE
-is enabled. At least one of the spiceport or spicetls_port must be
-given if SPICE is enabled.  NB. the options depending on spicetls_port
-have not been supported.
-
-=item B<spicedisable_ticketing=BOOLEAN>
-
-Enable client connection without password. When disabled, spicepasswd
-must be set. The default is false (0).
-
-=item B<spicepasswd="PASSWORD">
-
-Specify the ticket password which is used by a client for connection.
-
-=item B<spiceagent_mouse=BOOLEAN>
-
-Whether SPICE agent is used for client mouse mode. The default is true (1)
-(turn on)
-
-=item B<spicevdagent=BOOLEAN>
-
-Enables spice vdagent. The Spice vdagent is an optional component for
-enhancing user experience and performing guest-oriented management
-tasks. Its features includes: client mouse mode (no need to grab mouse
-by client, no mouse lag), automatic adjustment of screen resolution,
-copy and paste (text and image) between client and domU. It also
-requires vdagent service installed on domU o.s. to work. The default is 0.
-
-=item B<spice_clipboard_sharing=BOOLEAN>
-
-Enables Spice clipboard sharing (copy/paste). It requires spicevdagent
-enabled. The default is false (0).
-
-=item B<spiceusbredirection=NUMBER>
-
-Enables spice usbredirection. Creates NUMBER usbredirection channels
-for redirection of up to 4 usb devices from spice client to domU's qemu.
-It requires an usb controller and if not defined it will automatically adds
-an usb2 controller. The default is disabled (0).
-
-=item B<spice_image_compression=[auto_glz|auto_lz|quic|glz|lz|off]>
-
-Specifies what image compression is to be used by spice (if given), otherwise
-the qemu default will be used. Please see documentations of your current qemu
-version for details.
-
-=item B<spice_streaming_video=[filter|all|off]>
-
-Specifies what streaming video setting is to be used by spice (if given),
-otherwise the qemu default will be used.
-
-=back
-
-=head3 Miscellaneous Emulated Hardware
-
-=over 4
-
-=item B<serial=[ "DEVICE", "DEVICE", ...]>
-
-Redirect virtual serial ports to B<DEVICE>s. Please see the
-B<-serial> option in the L<qemu(1)> manpage for details of the valid
-B<DEVICE> options. Default is B<vc> when in graphical mode and
-B<stdio> if B<nographics=1> is used.
-
-The form serial=DEVICE is also accepted for backwards compatibilty.
-
-=item B<soundhw=DEVICE>
-
-Select the virtual sound card to expose to the guest. The valid
-devices are defined by the device model configuration, please see the
-L<qemu(1)> manpage for details. The default is not to export any sound
-device.
-
-=item B<usb=BOOLEAN>
-
-Enables or disables an emulated USB bus in the guest.
-
-=item B<usbversion=NUMBER>
-
-Specifies the type of an emulated USB bus in the guest. 1 for usb1,
-2 for usb2 and 3 for usb3, it is available only with upstream qemu.
-Due to implementation limitations this is not compatible with the usb
-and usbdevice parameters.
-Default is 0 (no usb controller defined).
-
-=item B<usbdevice=[ "DEVICE", "DEVICE", ...]>
-
-Adds B<DEVICE>s to the emulated USB bus. The USB bus must also be
-enabled using B<usb=1>. The most common use for this option is
-B<usbdevice=['tablet']> which adds pointer device using absolute
-coordinates. Such devices function better than relative coordinate
-devices (such as a standard mouse) since many methods of exporting
-guest graphics (such as VNC) work better in this mode. Note that this
-is independent of the actual pointer device you are using on the
-host/client side.
-
-Host devices can also be passed through in this way, by specifying
-host:USBID, where USBID is of the form xxxx:yyyy.  The USBID can
-typically be found by using lsusb or usb-devices.
-
-If you wish to use the "host:bus.addr" format, remove any leading '0' from the
-bus and addr. For example, for the USB device on bus 008 dev 002, you should
-write "host:8.2".
-
-The form usbdevice=DEVICE is also accepted for backwards compatibility.
-
-More valid options can be found in the "usbdevice" section of the qemu
-documentation.
-
-=item B<vendor_device="VENDOR_DEVICE">
-
-Selects which variant of the QEMU xen-pvdevice should be used for this
-guest. Valid values are:
-
-=over 4
-
-=item B<none>
-
-The xen-pvdevice should be omitted. This is the default.
-
-=item B<xenserver>
-
-The xenserver variant of the xen-pvdevice (device-id=C000) will be
-specified, enabling the use of XenServer PV drivers in the guest.
-
-=back
-
-This parameter only takes effect when device_model_version=qemu-xen.
-See F<docs/misc/pci-device-reservations.txt> for more information.
-
-=back
-
-=head2 Device-Model Options
-
-The following options control the selection of the device-model.  This
-is the component which provides emulation of the virtual devices to an
-HVM guest.  For a PV guest a device-model is sometimes used to provide
-backends for certain PV devices (most usually a virtual framebuffer
-device).
-
-=over 4
-
-=item B<device_model_version="DEVICE-MODEL">
-
-Selects which variant of the device-model should be used for this
-guest. Valid values are:
-
-=over 4
-
-=item B<qemu-xen>
-
-Use the device-model merged into the upstream QEMU project.
-This device-model is the default for Linux dom0.
-
-=item B<qemu-xen-traditional>
-
-Use the device-model based upon the historical Xen fork of Qemu.
-This device-model is still the default for NetBSD dom0.
-
-=item B<none>
-
-Don't use any device model. This requires a kernel capable of booting
-without emulated devices.
-
-=back
-
-It is recommended to accept the default value for new guests.  If
-you have existing guests then, depending on the nature of the guest
-Operating System, you may wish to force them to use the device
-model which they were installed with.
-
-=item B<device_model_override="PATH">
-
-Override the path to the binary to be used as the device-model. The
-binary provided here MUST be consistent with the
-`device_model_version` which you have specified. You should not
-normally need to specify this option.
-
-=item B<device_model_stubdomain_override=BOOLEAN>
-
-Override the use of stubdomain based device-model.  Normally this will
-be automatically selected based upon the other features and options
-you have selected.
-
-=item B<device_model_stubdomain_seclabel="LABEL">
-
-Assign an XSM security label to the device-model stubdomain.
-
-=item B<device_model_args=[ "ARG", "ARG", ...]>
-
-Pass additional arbitrary options on the device-model command
-line. Each element in the list is passed as an option to the
-device-model.
-
-=item B<device_model_args_pv=[ "ARG", "ARG", ...]>
-
-Pass additional arbitrary options on the device-model command line for
-a PV device model only. Each element in the list is passed as an
-option to the device-model.
-
-=item B<device_model_args_hvm=[ "ARG", "ARG", ...]>
-
-Pass additional arbitrary options on the device-model command line for
-an HVM device model only. Each element in the list is passed as an
-option to the device-model.
-
-=back
-
-=head2 Keymaps
-
-The keymaps available are defined by the device-model which you are
-using. Commonly this includes:
-
-        ar  de-ch  es  fo     fr-ca  hu  ja  mk     no  pt-br  sv
-        da  en-gb  et  fr     fr-ch  is  lt  nl     pl  ru     th
-        de  en-us  fi  fr-be  hr     it  lv  nl-be  pt  sl     tr
-
-The default is B<en-us>.
-
-See L<qemu(1)> for more information.
-
-=head2 Architecture Specific options
-
-=head3 ARM
-
-=over 4
-
-=item B<gic_version="vN">
-
-Version of the GIC emulated for the guest. Currently, the following
-versions are supported:
-
-=over 4
-
-=item B<v2>
-
-Emulate a GICv2
-
-=item B<v3>
-
-Emulate a GICv3. Note that the emulated GIC does not support the
-GICv2 compatibility mode.
-
-=item B<default>
-
-Emulate the same version as the native GIC hardware used by host where
-the domain was created.
-
-=back
-
-This requires hardware compatibility with the requested version. Either
-natively or via hardware backwards compatibility support.
-
-=back
-
-=head1 SEE ALSO
-
-=over 4
-
-=item L<xl(1)>
-
-=item L<xlcpupool.cfg(5)>
-
-=item F<xl-disk-configuration>
-
-=item F<xl-network-configuration>
-
-=item F<docs/misc/tscmode.txt>
-
-=back
-
-=head1 FILES
-
-F</etc/xen/NAME.cfg>
-F</var/lib/xen/dump/NAME>
-
-=head1 BUGS
-
-This document may contain items which require further
-documentation. Patches to improve incomplete items (or any other item)
-are gratefully received on the xen-devel@xxxxxxxxxxxxx mailing
-list. Please see L<http://wiki.xen.org/wiki/SubmittingXenPatches> for
-information on how to submit a patch to Xen.
-
diff --git a/docs/man/xl.cfg.pod.5.in b/docs/man/xl.cfg.pod.5.in
new file mode 100644
index 0000000..3bb27d0
--- /dev/null
+++ b/docs/man/xl.cfg.pod.5.in
@@ -0,0 +1,2029 @@
+=head1 NAME
+
+xl.cfg - XL Domain Configuration File Syntax
+
+=head1 SYNOPSIS
+
+ /etc/xen/xldomain
+
+=head1 DESCRIPTION
+
+To create a VM (a domain in Xen terminology, sometimes called a guest)
+with xl requires the provision of a domain config file.  Typically
+these live in `/etc/xen/DOMAIN.cfg` where DOMAIN is the name of the
+domain.
+
+=head1 SYNTAX
+
+A domain config file consists of a series of C<KEY=VALUE> pairs.
+
+Some C<KEY>s are mandatory, others are general options which apply to
+any guest type while others relate only to specific guest types
+(e.g. PV or HVM guests).
+
+A value C<VALUE> is one of:
+
+=over 4
+
+=item B<"STRING">
+
+A string, surrounded by either single or double quotes.
+
+=item B<NUMBER>
+
+A number, in either decimal, octal (using a C<0> prefix) or
+hexadecimal (using an C<0x> prefix).
+
+=item B<BOOLEAN>
+
+A C<NUMBER> interpreted as C<False> (C<0>) or C<True> (any other
+value).
+
+=item B<[ VALUE, VALUE, ... ]>
+
+A list of C<VALUES> of the above types. Lists can be heterogeneous and
+nested.
+
+=back
+
+The semantics of each C<KEY> defines which form of C<VALUE> is required.
+
+Pairs may be separated either by a newline or a semicolon.  Both
+of the following are valid:
+
+  name="h0"
+  builder="hvm"
+
+  name="h0"; builder="hvm"
+
+=head1 OPTIONS
+
+=head2 Mandatory Configuration Items
+
+The following key is mandatory for any guest type:
+
+=over 4
+
+=item B<name="NAME">
+
+Specifies the name of the domain.  Names of domains existing on a
+single host must be unique.
+
+=back
+
+=head2 Selecting Guest Type
+
+=over 4
+
+=item B<builder="generic">
+
+Specifies that this is to be a PV domain. This is the default.
+
+=item B<builder="hvm">
+
+Specifies that this is to be an HVM domain.  That is, a fully
+virtualised computer with emulated BIOS, disk and network peripherals,
+etc.  The default is a PV domain, suitable for hosting Xen-aware guest
+operating systems.
+
+=back
+
+=head2 General Options
+
+The following options apply to guests of any type.
+
+=head3 CPU Allocation
+
+=over 4
+
+=item B<pool="CPUPOOLNAME">
+
+Put the guest's vcpus into the named cpu pool.
+
+=item B<vcpus=N>
+
+Start the guest with N vcpus initially online.
+
+=item B<maxvcpus=M>
+
+Allow the guest to bring up a maximum of M vcpus. At start of day if
+`vcpus=N` is less than `maxvcpus=M` then the first `N` vcpus will be
+created online and the remainder will be offline.
+
+=item B<cpus="CPU-LIST">
+
+List of which cpus the guest is allowed to use. Default is no pinning at
+all (more on this below). A C<CPU-LIST> may be specified as follows:
+
+=over 4
+
+=item "all"
+
+To allow all the vcpus of the guest to run on all the cpus on the host.
+
+=item "0-3,5,^1"
+
+To allow all the vcpus of the guest to run on cpus 0,2,3,5. Combining
+this with "all" is possible, meaning "all,^7" results in all the vcpus
+of the guest running on all the cpus on the host except cpu 7.
+
+=item "nodes:0-3,node:^2"
+
+To allow all the vcpus of the guest to run on the cpus from NUMA nodes
+0,1,3 of the host. So, if cpus 0-3 belongs to node 0, cpus 4-7 belongs
+to node 1 and cpus 8-11 to node 3, the above would mean all the vcpus
+of the guest will run on cpus 0-3,8-11.
+
+Combining this notation with the one above is possible. For instance,
+"1,node:2,^6", means all the vcpus of the guest will run on cpu 1 and
+on all the cpus of NUMA node 2, but not on cpu 6. Following the same
+example as above, that would be cpus 1,4,5,7.
+
+Combining this with "all" is also possible, meaning "all,^nodes:1"
+results in all the vcpus of the guest running on all the cpus on the
+host, except for the cpus belonging to the host NUMA node 1.
+
+=item ["2", "3-8,^5"]
+
+To ask for specific vcpu mapping. That means (in this example), vcpu 0
+of the guest will run on cpu 2 of the host and vcpu 1 of the guest will
+run on cpus 3,4,6,7,8 of the host.
+
+More complex notation can be also used, exactly as described above. So
+"all,^5-8", or just "all", or "node:0,node:2,^9-11,18-20" are all legal,
+for each element of the list.
+
+=back
+
+If this option is not specified, no vcpu to cpu pinning is established,
+and the vcpus of the guest can run on all the cpus of the host. If this
+option is specified, the intersection of the vcpu pinning mask, provided
+here, and the soft affinity mask, provided via B<cpus\_soft=> (if any),
+is utilized to compute the domain node-affinity, for driving memory
+allocations.
+
+=item B<cpus_soft="CPU-LIST">
+
+Exactly as B<cpus=>, but specifies soft affinity, rather than pinning
+(hard affinity). When using the credit scheduler, this means what cpus
+the vcpus of the domain prefer.
+
+A C<CPU-LIST> is specified exactly as above, for B<cpus=>.
+
+If this option is not specified, the vcpus of the guest will not have
+any preference regarding on what cpu to run. If this option is specified,
+the intersection of the soft affinity mask, provided here, and the vcpu
+pinning, provided via B<cpus=> (if any), is utilized to compute the
+domain node-affinity, for driving memory allocations.
+
+If this option is not specified (and B<cpus=> is not specified either),
+libxl automatically tries to place the guest on the least possible
+number of nodes. A heuristic approach is used for choosing the best
+node (or set of nodes), with the goal of maximizing performance for
+the guest and, at the same time, achieving efficient utilization of
+host cpus and memory. In that case, the soft affinity of all the vcpus
+of the domain will be set to the pcpus belonging to the NUMA nodes
+chosen during placement.
+
+For more details, see F<docs/misc/xl-numa-placement.markdown>.
+
+=back
+
+=head3 CPU Scheduling
+
+=over 4
+
+=item B<cpu_weight=WEIGHT>
+
+A domain with a weight of 512 will get twice as much CPU as a domain
+with a weight of 256 on a contended host.
+Legal weights range from 1 to 65535 and the default is 256.
+Honoured by the credit and credit2 schedulers.
+
+=item B<cap=N>
+
+The cap optionally fixes the maximum amount of CPU a domain will be
+able to consume, even if the host system has idle CPU cycles.
+The cap is expressed in percentage of one physical CPU:
+100 is 1 physical CPU, 50 is half a CPU, 400 is 4 CPUs, etc.
+The default, 0, means there is no upper cap.
+Honoured by the credit and credit2 schedulers.
+
+NB: Many systems have features that will scale down the computing
+power of a cpu that is not 100% utilized.  This can be in the
+operating system, but can also sometimes be below the operating system
+in the BIOS.  If you set a cap such that individual cores are running
+at less than 100%, this may have an impact on the performance of your
+workload over and above the impact of the cap. For example, if your
+processor runs at 2GHz, and you cap a vm at 50%, the power management
+system may also reduce the clock speed to 1GHz; the effect will be
+that your VM gets 25% of the available power (50% of 1GHz) rather than
+50% (50% of 2GHz).  If you are not getting the performance you expect,
+look at performance and cpufreq options in your operating system and
+your BIOS.
+
+=back
+
+=head3 Memory Allocation
+
+=over 4
+
+=item B<memory=MBYTES>
+
+Start the guest with MBYTES megabytes of RAM.
+
+=item B<maxmem=MBYTES>
+
+Specifies the maximum amount of memory a guest can ever see.
+The value of B<maxmem=> must be equal or greater than B<memory=>.
+
+In combination with B<memory=> it will start the guest "pre-ballooned",
+if the values of B<memory=> and B<maxmem=> differ.
+A "pre-ballooned" HVM guest needs a balloon driver, without a balloon driver
+it will crash.
+
+NOTE: Because of the way ballooning works, the guest has to allocate
+memory to keep track of maxmem pages, regardless of how much memory it
+actually has available to it.  A guest with maxmem=262144 and
+memory=8096 will report significantly less memory available for use
+than a system with maxmem=8096 memory=8096 due to the memory overhead
+of having to track the unused pages.
+
+=back
+
+=head3 Guest Virtual NUMA Configuration
+
+=over 4
+
+=item B<vnuma=[ VNODE_SPEC, VNODE_SPEC, ... ]>
+
+Specify virtual NUMA configuration with positional arguments. The
+nth B<VNODE_SPEC> in the list specifies the configuration of nth
+virtual node.
+
+Note that virtual NUMA for PV guest is not yet supported, because
+there is an issue with cpuid handling that affects PV virtual NUMA.
+Furthermore, guests with virtual NUMA cannot be saved or migrated
+because the migration stream does not preserve node information.
+
+Each B<VNODE_SPEC> is a list, which has a form of
+"[VNODE_CONFIG_OPTION,VNODE_CONFIG_OPTION, ... ]"  (without quotes).
+
+For example vnuma = [ ["pnode=0","size=512","vcpus=0-4","vdistances=10,20"] ]
+means vnode 0 is mapped to pnode 0, has 512MB ram, has vcpus 0 to 4, the
+distance to itself is 10 and the distance to vnode 1 is 20.
+
+Each B<VNODE_CONFIG_OPTION> is a quoted key=value pair. Supported
+B<VNODE_CONFIG_OPTION>s are (they are all mandatory at the moment):
+
+=over 4
+
+=item B<pnode=NUMBER>
+
+Specify which physical node this virtual node maps to.
+
+=item B<size=MBYTES>
+
+Specify the size of this virtual node. The sum of memory size of all
+vnodes will become B<maxmem=>. If B<maxmem=> is specified separately,
+a check is performed to make sure the sum of all vnode memory matches
+B<maxmem=>.
+
+=item B<vcpus=CPU-STRING>
+
+Specify which vcpus belong to this node. B<CPU-STRING> is a string
+separated by comma. You can specify range and single cpu. An example
+is "vcpus=0-5,8", which means you specify vcpu 0 to vcpu 5, and vcpu
+8.
+
+=item B<vdistances=NUMBER, NUMBER, ... >
+
+Specify virtual distance from this node to all nodes (including
+itself) with positional arguments. For example, "vdistance=10,20"
+for vnode 0 means the distance from vnode 0 to vnode 0 is 10, from
+vnode 0 to vnode 1 is 20. The number of arguments supplied must match
+the total number of vnodes.
+
+Normally you can use the values from "xl info -n" or "numactl
+--hardware" to fill in vdistance list.
+
+=back
+
+=back
+
+=head3 Event Actions
+
+=over 4
+
+=item B<on_poweroff="ACTION">
+
+Specifies what should be done with the domain if it shuts itself down.
+The C<ACTION>s are:
+
+=over 4
+
+=item B<destroy>
+
+destroy the domain
+
+=item B<restart>
+
+destroy the domain and immediately create a new domain with the same
+configuration
+
+=item B<rename-restart>
+
+rename the domain which terminated, and then immediately create a new
+domain with the same configuration as the original
+
+=item B<preserve>
+
+keep the domain.  It can be examined, and later destroyed with `xl
+destroy`.
+
+=item B<coredump-destroy>
+
+write a "coredump" of the domain to F<@XEN_DUMP_DIR@/NAME> and then
+destroy the domain.
+
+=item B<coredump-restart>
+
+write a "coredump" of the domain to F<@XEN_DUMP_DIR@/NAME> and then
+restart the domain.
+
+=item B<soft-reset>
+
+Reset all Xen specific interfaces for the Xen-aware HVM domain allowing
+it to reestablish these interfaces and continue executing the domain. PV
+and non-Xen-aware HVM guests are not supported.
+
+=back
+
+The default for C<on_poweroff> is C<destroy>.
+
+=item B<on_reboot="ACTION">
+
+Action to take if the domain shuts down with a reason code requesting
+a reboot.  Default is C<restart>.
+
+=item B<on_watchdog="ACTION">
+
+Action to take if the domain shuts down due to a Xen watchdog timeout.
+Default is C<destroy>.
+
+=item B<on_crash="ACTION">
+
+Action to take if the domain crashes.  Default is C<destroy>.
+
+=item B<on_soft_reset="ACTION">
+
+Action to take if the domain performs 'soft reset' (e.g. does kexec).
+Default is C<soft-reset>.
+
+=back
+
+=head3 Direct Kernel Boot
+
+Direct kernel boot allows booting directly from a kernel and initrd
+stored in the host physical machine OS, allowing command line arguments
+to be passed directly. PV guest direct kernel boot is supported. HVM
+guest direct kernel boot is supported with limitation (it's supported
+when using qemu-xen and default BIOS 'seabios'; not supported in case of
+stubdom-dm and old rombios.)
+
+=over 4
+
+=item B<kernel="PATHNAME">
+
+Load the specified file as the kernel image.
+
+=item B<ramdisk="PATHNAME">
+
+Load the specified file as the ramdisk.
+
+=item B<cmdline="STRING">
+
+Append B<cmdline="STRING"> to the kernel command line. (Note: it is
+guest specific what meaning this has). It can replace B<root="STRING">
+plus B<extra="STRING"> and is preferred. When B<cmdline="STRING"> is set,
+B<root="STRING"> and B<extra="STRING"> will be ignored.
+
+=item B<root="STRING">
+
+Append B<root="STRING"> to the kernel command line (Note: it is guest
+specific what meaning this has).
+
+=item B<extra="STRING">
+
+Append B<STRING> to the kernel command line. (Note: it is guest
+specific what meaning this has).
+
+=back
+
+=head3 Other Options
+
+=over 4
+
+=item B<uuid="UUID">
+
+Specifies the UUID of the domain.  If not specified, a fresh unique
+UUID will be generated.
+
+=item B<seclabel="LABEL">
+
+Assign an XSM security label to this domain.
+
+=item B<init_seclabel="LABEL">
+
+Specify an XSM security label used for this domain temporarily during
+its build. The domain's XSM label will be changed to the execution
+seclabel (specified by "seclabel") once the build is complete, prior to
+unpausing the domain. With a properly constructed security policy (such
+as nomigrate_t in the example policy), this can be used to build a
+domain whose memory is not accessible to the toolstack domain.
+
+=item B<nomigrate=BOOLEAN>
+
+Disable migration of this domain.  This enables certain other features
+which are incompatible with migration. Currently this is limited to
+enabling the invariant TSC feature flag in cpuid results when TSC is
+not emulated.
+
+=item B<driver_domain=BOOLEAN>
+
+Specify that this domain is a driver domain. This enables certain
+features needed in order to run a driver domain.
+
+=item B<device_tree=PATH>
+
+Specify a partial device tree (compiled via the Device Tree Compiler).
+Everything under the node "/passthrough" will be copied into the guest
+device tree. For convenience, the node "/aliases" is also copied to allow
+the user to defined aliases which can be used by the guest kernel.
+
+Given the complexity of verifying the validity of a device tree, this
+option should only be used with trusted device tree.
+
+Note that the partial device tree should avoid to use the phandle 65000
+which is reserved by the toolstack.
+
+=back
+
+=head2 Devices
+
+The following options define the paravirtual, emulated and physical
+devices which the guest will contain.
+
+=over 4
+
+=item B<disk=[ "DISK_SPEC_STRING", "DISK_SPEC_STRING", ...]>
+
+Specifies the disks (both emulated disks and Xen virtual block
+devices) which are to be provided to the guest, and what objects on
+the they should map to.  See F<docs/misc/xl-disk-configuration.txt>.
+
+=item B<vif=[ "NET_SPEC_STRING", "NET_SPEC_STRING", ...]>
+
+Specifies the networking provision (both emulated network adapters,
+and Xen virtual interfaces) to provided to the guest.  See
+F<docs/misc/xl-network-configuration.markdown>.
+
+=item B<vtpm=[ "VTPM_SPEC_STRING", "VTPM_SPEC_STRING", ...]>
+
+Specifies the virtual trusted platform module to be
+provided to the guest. Please see F<docs/misc/vtpm.txt>
+for more details.
+
+Each B<VTPM_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
+settings, from the following list:
+
+=over 4
+
+=item C<backend=DOMAIN>
+
+Specify the backend domain name of id. This value is required!
+If this domain is a guest, the backend should be set to the
+vtpm domain name. If this domain is a vtpm, the
+backend should be set to the vtpm manager domain name.
+
+=item C<uuid=UUID>
+
+Specify the uuid of this vtpm device. The uuid is used to uniquely
+identify the vtpm device. You can create one using the uuidgen
+program on unix systems. If left unspecified, a new uuid
+will be randomly generated every time the domain boots.
+If this is a vtpm domain, you should specify a value. The
+value is optional if this is a guest domain.
+
+=back
+
+=item B<vfb=[ "VFB_SPEC_STRING", "VFB_SPEC_STRING", ...]>
+
+Specifies the paravirtual framebuffer devices which should be supplied
+to the domain.
+
+This option does not control the emulated graphics card presented to
+an HVM guest. See L<Emulated VGA Graphics Device> below for how to
+configure the emulated device. If L<Emulated VGA Graphics Device> options
+are used in a PV guest configuration, xl will pick up B<vnc>, B<vnclisten>,
+B<vncpasswd>, B<vncdisplay>, B<vncunused>, B<sdl>, B<opengl> and
+B<keymap> to construct paravirtual framebuffer device for the guest.
+
+Each B<VFB_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
+settings, from the following list:
+
+=over 4
+
+=item C<vnc=BOOLEAN>
+
+Allow access to the display via the VNC protocol.  This enables the
+other VNC-related settings.  The default is to enable this.
+
+=item C<vnclisten="ADDRESS[:DISPLAYNUM]">
+
+Specifies the IP address, and optionally VNC display number, to use.
+
+NB that if you specify the display number here, you should not use
+vncdisplay.
+
+=item C<vncdisplay=DISPLAYNUM>
+
+Specifies the VNC display number to use.  The actual TCP port number
+will be DISPLAYNUM+5900.
+
+NB that you should not use this option if you set the displaynum in the
+vnclisten string.
+
+=item C<vncunused=BOOLEAN>
+
+Requests that the VNC display setup search for a free TCP port to use.
+The actual display used can be accessed with C<xl vncviewer>.
+
+=item C<vncpasswd="PASSWORD">
+
+Specifies the password for the VNC server.
+
+=item C<sdl=BOOLEAN>
+
+Specifies that the display should be presented via an X window (using
+Simple DirectMedia Layer). The default is to not enable this mode.
+
+=item C<display=DISPLAY>
+
+Specifies the X Window display that should be used when the sdl option
+is used.
+
+=item C<xauthority=XAUTHORITY>
+
+Specifies the path to the X authority file that should be used to
+connect to the X server when the sdl option is used.
+
+=item C<opengl=BOOLEAN>
+
+Enable OpenGL acceleration of the SDL display. Only effects machines
+using C<device_model_version="qemu-xen-traditional"> and only if the
+device-model was compiled with OpenGL support. Disabled by default.
+
+=item C<keymap="LANG">
+
+Configure the keymap to use for the keyboard associated with this
+display. If the input method does not easily support raw keycodes
+(e.g. this is often the case when using VNC) then this allows us to
+correctly map the input keys into keycodes seen by the guest. The
+specific values which are accepted are defined by the version of the
+device-model which you are using. See L</"Keymaps"> below or consult the
+L<qemu(1)> manpage. The default is B<en-us>.
+
+=back
+
+=item B<channel=[ "CHANNEL_SPEC_STRING", "CHANNEL_SPEC_STRING", ...]>
+
+Specifies the virtual channels to be provided to the guest. A
+channel is a low-bandwidth, bidirectional byte stream, which resembles
+a serial link. Typical uses for channels include transmitting VM
+configuration after boot and signalling to in-guest agents. Please see
+F<docs/misc/channels.txt> for more details.
+
+Each B<CHANNEL_SPEC_STRING> is a comma-separated list of C<KEY=VALUE>
+seettings. Leading and trailing whitespace is ignored in both KEY and
+VALUE. Neither KEY nor VALUE may contain ',', '=' or '"'. Defined values
+are:
+
+=over 4
+
+=item C<backend=DOMAIN>
+
+Specify the backend domain name or id. This parameter is optional. If
+this parameter is omitted then the toolstack domain will be assumed.
+
+=item C<name=NAME>
+
+Specify the string name for this device. This parameter is mandatory.
+This should be a well-known name for the specific application (e.g.
+guest agent) and should be used by the frontend to connect the
+application to the right channel device. There is no formal registry
+of channel names, so application authors are encouraged to make their
+names unique by including domain name and version number in the string
+(e.g. org.mydomain.guestagent.1).
+
+=item C<connection=CONNECTION>
+
+Specify how the backend will be implemented. This following options are
+available:
+
+=over 4
+
+=item B<connection=SOCKET>
+
+The backend will bind a Unix domain socket (at the path given by
+B<path=PATH>), call listen and accept connections. The backend will proxy
+data between the channel and the connected socket.
+
+=item B<connection=PTY>
+
+The backend will create a pty and proxy data between the channel and the
+master device. The command B<xl channel-list> can be used to discover the
+assigned slave device.
+
+=back
+
+=back
+
+=item B<rdm="RDM_RESERVATION_STRING">
+
+(HVM/x86 only) Specifies information about Reserved Device Memory (RDM),
+which is necessary to enable robust device passthrough. One example of RDM
+is reported through ACPI Reserved Memory Region Reporting (RMRR) structure
+on x86 platform.
+
+B<RDM_RESERVE_STRING> has the form C<[KEY=VALUE,KEY=VALUE,...> where:
+
+=over 4
+

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.