[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [linux-2.6.18-xen] sync Xen public headers


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-linux-2.6.18-xen <patchbot@xxxxxxx>
  • Date: Tue, 18 Dec 2012 09:55:04 +0000
  • Delivery-date: Tue, 18 Dec 2012 09:55:18 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich
# Date 1355823740 -3600
# Node ID 302f7c9423c9ab30f0cad07f334db90c05796f7d
# Parent  864f8414c071d33b4fdc51229055783df1619cf5
sync Xen public headers

(again without dropping ia64 bits)
---


diff -r 864f8414c071 -r 302f7c9423c9 drivers/xen/scsiback/scsiback.c
--- a/drivers/xen/scsiback/scsiback.c   Tue Dec 18 10:15:28 2012 +0100
+++ b/drivers/xen/scsiback/scsiback.c   Tue Dec 18 10:42:20 2012 +0100
@@ -52,8 +52,8 @@ struct list_head pending_free;
 DEFINE_SPINLOCK(pending_free_lock);
 DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
 
-int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
-module_param_named(reqs, vscsiif_reqs, int, 0);
+static unsigned int vscsiif_reqs = 128;
+module_param_named(reqs, vscsiif_reqs, uint, 0);
 MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
 
 static unsigned int log_print_stat = 0;
diff -r 864f8414c071 -r 302f7c9423c9 include/xen/interface/arch-arm.h
--- a/include/xen/interface/arch-arm.h  Tue Dec 18 10:15:28 2012 +0100
+++ b/include/xen/interface/arch-arm.h  Tue Dec 18 10:42:20 2012 +0100
@@ -51,18 +51,36 @@
 
 #define XEN_HYPERCALL_TAG   0XEA1
 
+#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
 
 #ifndef __ASSEMBLY__
-#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
-    typedef struct { type *p; } __guest_handle_ ## name
+#define ___DEFINE_XEN_GUEST_HANDLE(name, type)                  \
+    typedef union { type *p; unsigned long q; }                 \
+        __guest_handle_ ## name;                                \
+    typedef union { type *p; uint64_aligned_t q; }              \
+        __guest_handle_64_ ## name;
 
+/*
+ * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
+ * in a struct in memory. On ARM is always 8 bytes sizes and 8 bytes
+ * aligned.
+ * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an
+ * hypercall argument. It is 4 bytes on aarch and 8 bytes on aarch64.
+ */
 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \
     ___DEFINE_XEN_GUEST_HANDLE(name, type);   \
     ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
 #define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
-#define __XEN_GUEST_HANDLE(name)        __guest_handle_ ## name
+#define __XEN_GUEST_HANDLE(name)        __guest_handle_64_ ## name
 #define XEN_GUEST_HANDLE(name)          __XEN_GUEST_HANDLE(name)
-#define set_xen_guest_handle_raw(hnd, val)  do { (hnd).p = val; } while (0)
+/* this is going to be changed on 64 bit */
+#define XEN_GUEST_HANDLE_PARAM(name)    __guest_handle_ ## name
+#define set_xen_guest_handle_raw(hnd, val)                  \
+    do {                                                    \
+        typeof(&(hnd)) _sxghr_tmp = &(hnd);                 \
+        _sxghr_tmp->q = 0;                                  \
+        _sxghr_tmp->p = val;                                \
+    } while ( 0 )
 #ifdef __XEN_TOOLS__
 #define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
 #endif
@@ -122,9 +140,14 @@ typedef uint64_t xen_pfn_t;
 /* Only one. All other VCPUS must use VCPUOP_register_vcpu_info */
 #define XEN_LEGACY_MAX_VCPUS 1
 
-typedef uint32_t xen_ulong_t;
+typedef uint64_t xen_ulong_t;
+#define PRI_xen_ulong PRIx64
 
 struct vcpu_guest_context {
+#define _VGCF_online                   0
+#define VGCF_online                    (1<<_VGCF_online)
+    uint32_t flags;                         /* VGCF_* */
+
     struct cpu_user_regs user_regs;         /* User-level CPU registers     */
 
     uint32_t sctlr;
diff -r 864f8414c071 -r 302f7c9423c9 include/xen/interface/arch-x86/xen.h
--- a/include/xen/interface/arch-x86/xen.h      Tue Dec 18 10:15:28 2012 +0100
+++ b/include/xen/interface/arch-x86/xen.h      Tue Dec 18 10:42:20 2012 +0100
@@ -38,12 +38,21 @@
     typedef type * __guest_handle_ ## name
 #endif
 
+/*
+ * XEN_GUEST_HANDLE represents a guest pointer, when passed as a field
+ * in a struct in memory.
+ * XEN_GUEST_HANDLE_PARAM represent a guest pointer, when passed as an
+ * hypercall argument.
+ * XEN_GUEST_HANDLE_PARAM and XEN_GUEST_HANDLE are the same on X86 but
+ * they might not be on other architectures.
+ */
 #define __DEFINE_XEN_GUEST_HANDLE(name, type) \
     ___DEFINE_XEN_GUEST_HANDLE(name, type);   \
     ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
 #define DEFINE_XEN_GUEST_HANDLE(name)   __DEFINE_XEN_GUEST_HANDLE(name, name)
 #define __XEN_GUEST_HANDLE(name)        __guest_handle_ ## name
 #define XEN_GUEST_HANDLE(name)          __XEN_GUEST_HANDLE(name)
+#define XEN_GUEST_HANDLE_PARAM(name)    XEN_GUEST_HANDLE(name)
 #define set_xen_guest_handle_raw(hnd, val)  do { (hnd).p = val; } while (0)
 #ifdef __XEN_TOOLS__
 #define get_xen_guest_handle(val, hnd)  do { val = (hnd).p; } while (0)
@@ -62,7 +71,7 @@ typedef unsigned long xen_pfn_t;
 #endif
 
 /*
- * SEGMENT DESCRIPTOR TABLES
+ * `incontents 200 segdesc Segment Descriptor Tables
  */
 /*
  * ` enum neg_errnoval
@@ -74,17 +83,31 @@ typedef unsigned long xen_pfn_t;
  * start of the GDT because some stupid OSes export hard-coded selector values
  * in their ABI. These hard-coded values are always near the start of the GDT,
  * so Xen places itself out of the way, at the far end of the GDT.
+ *
+ * NB The LDT is set using the MMUEXT_SET_LDT op of HYPERVISOR_mmuext_op
  */
 #define FIRST_RESERVED_GDT_PAGE  14
 #define FIRST_RESERVED_GDT_BYTE  (FIRST_RESERVED_GDT_PAGE * 4096)
 #define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
 
+
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_descriptor(u64 pa, u64 desc);
+ * `
+ * ` @pa   The machine physical address of the descriptor to
+ * `       update. Must be either a descriptor page or writable.
+ * ` @desc The descriptor value to update, in the same format as a
+ * `       native descriptor table entry.
+ */
+
 /* Maximum number of virtual CPUs in legacy multi-processor guests. */
 #define XEN_LEGACY_MAX_VCPUS 32
 
 #ifndef __ASSEMBLY__
 
 typedef unsigned long xen_ulong_t;
+#define PRI_xen_ulong "lx"
 
 /*
  * ` enum neg_errnoval
diff -r 864f8414c071 -r 302f7c9423c9 include/xen/interface/domctl.h
--- a/include/xen/interface/domctl.h    Tue Dec 18 10:15:28 2012 +0100
+++ b/include/xen/interface/domctl.h    Tue Dec 18 10:42:20 2012 +0100
@@ -32,9 +32,9 @@
 #error "domctl operations are intended for use by node control tools only"
 #endif
 
-#include <xen/hvm/save.h>
 #include "xen.h"
 #include "grant_table.h"
+#include "hvm/save.h"
 
 #define XEN_DOMCTL_INTERFACE_VERSION 0x00000008
 
@@ -136,7 +136,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getme
 #define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
 #define XEN_DOMCTL_PFINFO_XTAB    (0xfU<<28) /* invalid page */
 #define XEN_DOMCTL_PFINFO_XALLOC  (0xeU<<28) /* allocate-only page */
-#define XEN_DOMCTL_PFINFO_PAGEDTAB (0x8U<<28)
+#define XEN_DOMCTL_PFINFO_BROKEN  (0xdU<<28) /* broken page */
 #define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
 
 struct xen_domctl_getpageframeinfo {
@@ -857,6 +857,12 @@ struct xen_domctl_set_access_required {
 typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);
 
+struct xen_domctl_set_broken_page_p2m {
+    uint64_aligned_t pfn;
+};
+typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
+
 struct xen_domctl {
     uint32_t cmd;
 #define XEN_DOMCTL_createdomain                   1
@@ -922,6 +928,7 @@ struct xen_domctl {
 #define XEN_DOMCTL_set_access_required           64
 #define XEN_DOMCTL_audit_p2m                     65
 #define XEN_DOMCTL_set_virq_handler              66
+#define XEN_DOMCTL_set_broken_page_p2m           67
 #define XEN_DOMCTL_gdbsx_guestmemio            1000
 #define XEN_DOMCTL_gdbsx_pausevcpu             1001
 #define XEN_DOMCTL_gdbsx_unpausevcpu           1002
@@ -978,6 +985,7 @@ struct xen_domctl {
         struct xen_domctl_audit_p2m         audit_p2m;
         struct xen_domctl_set_virq_handler  set_virq_handler;
         struct xen_domctl_gdbsx_memio       gdbsx_guest_memio;
+        struct xen_domctl_set_broken_page_p2m set_broken_page_p2m;
         struct xen_domctl_gdbsx_pauseunp_vcpu gdbsx_pauseunp_vcpu;
         struct xen_domctl_gdbsx_domstatus   gdbsx_domstatus;
         uint8_t                             pad[128];
diff -r 864f8414c071 -r 302f7c9423c9 include/xen/interface/grant_table.h
--- a/include/xen/interface/grant_table.h       Tue Dec 18 10:15:28 2012 +0100
+++ b/include/xen/interface/grant_table.h       Tue Dec 18 10:42:20 2012 +0100
@@ -385,7 +385,11 @@ struct gnttab_setup_table {
     uint32_t nr_frames;
     /* OUT parameters. */
     int16_t  status;              /* => enum grant_status */
+#if __XEN_INTERFACE_VERSION__ < 0x00040300
     XEN_GUEST_HANDLE(ulong) frame_list;
+#else
+    XEN_GUEST_HANDLE(xen_pfn_t) frame_list;
+#endif
 };
 typedef struct gnttab_setup_table gnttab_setup_table_t;
 DEFINE_XEN_GUEST_HANDLE(gnttab_setup_table_t);
diff -r 864f8414c071 -r 302f7c9423c9 include/xen/interface/io/blkif.h
--- a/include/xen/interface/io/blkif.h  Tue Dec 18 10:15:28 2012 +0100
+++ b/include/xen/interface/io/blkif.h  Tue Dec 18 10:42:20 2012 +0100
@@ -126,6 +126,34 @@
  *      of this type may still be returned at any time with the
  *      BLKIF_RSP_EOPNOTSUPP result code.
  *
+ * feature-persistent
+ *      Values:         0/1 (boolean)
+ *      Default Value:  0
+ *      Notes: 7
+ *
+ *      A value of "1" indicates that the backend can keep the grants used
+ *      by the frontend driver mapped, so the same set of grants should be
+ *      used in all transactions. The maximum number of grants the backend
+ *      can map persistently depends on the implementation, but ideally it
+ *      should be RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST. Using this
+ *      feature the backend doesn't need to unmap each grant, preventing
+ *      costly TLB flushes. The backend driver should only map grants
+ *      persistently if the frontend supports it. If a backend driver chooses
+ *      to use the persistent protocol when the frontend doesn't support it,
+ *      it will probably hit the maximum number of persistently mapped grants
+ *      (due to the fact that the frontend won't be reusing the same grants),
+ *      and fall back to non-persistent mode. Backend implementations may
+ *      shrink or expand the number of persistently mapped grants without
+ *      notifying the frontend depending on memory constraints (this might
+ *      cause a performance degradation).
+ *
+ *      If a backend driver wants to limit the maximum number of persistently
+ *      mapped grants to a value less than RING_SIZE *
+ *      BLKIF_MAX_SEGMENTS_PER_REQUEST a LRU strategy should be used to
+ *      discard the grants that are less commonly used. Using a LRU in the
+ *      backend driver paired with a LIFO queue in the frontend will
+ *      allow us to have better performance in this scenario.
+ *
  *----------------------- Request Transport Parameters ------------------------
  *
  * max-ring-page-order
@@ -242,6 +270,27 @@
  *      The size of the frontend allocated request ring buffer in units of
  *      machine pages.  The value must be a power of 2.
  *
+ * feature-persistent
+ *      Values:         0/1 (boolean)
+ *      Default Value:  0
+ *      Notes: 7, 8, 9
+ *
+ *      A value of "1" indicates that the frontend will reuse the same grants
+ *      for all transactions, allowing the backend to map them with write
+ *      access (even when it should be read-only). If the frontend hits the
+ *      maximum number of allowed persistently mapped grants, it can fallback
+ *      to non persistent mode. This will cause a performance degradation,
+ *      since the the backend driver will still try to map those grants
+ *      persistently. Since the persistent grants protocol is compatible with
+ *      the previous protocol, a frontend driver can choose to work in
+ *      persistent mode even when the backend doesn't support it.
+ *
+ *      It is recommended that the frontend driver stores the persistently
+ *      mapped grants in a LIFO queue, so a subset of all persistently mapped
+ *      grants gets used commonly. This is done in case the backend driver
+ *      decides to limit the maximum number of persistently mapped grants
+ *      to a value less than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
+ *
  *------------------------- Virtual Device Properties -------------------------
  *
  * device-type
@@ -279,6 +328,17 @@
  *     'ring-ref' is used to communicate the grant reference for this
  *     page to the backend.  When using a multi-page ring, the 'ring-ref'
  *     node is not created.  Instead 'ring-ref0' - 'ring-refN' are used.
+ * (7) When using persistent grants data has to be copied from/to the page
+ *     where the grant is currently mapped. The overhead of doing this copy
+ *     however doesn't suppress the speed improvement of not having to unmap
+ *     the grants.
+ * (8) The frontend driver has to allow the backend driver to map all grants
+ *     with write access, even when they should be mapped read-only, since
+ *     further requests may reuse these grants and require write permissions.
+ * (9) Linux implementation doesn't have a limit on the maximum number of
+ *     grants that can be persistently mapped in the frontend driver, but
+ *     due to the frontent driver implementation it should never be bigger
+ *     than RING_SIZE * BLKIF_MAX_SEGMENTS_PER_REQUEST.
  */
 
 /*
diff -r 864f8414c071 -r 302f7c9423c9 include/xen/interface/io/vscsiif.h
--- a/include/xen/interface/io/vscsiif.h        Tue Dec 18 10:15:28 2012 +0100
+++ b/include/xen/interface/io/vscsiif.h        Tue Dec 18 10:42:20 2012 +0100
@@ -30,29 +30,33 @@
 #include "ring.h"
 #include "../grant_table.h"
 
-/* command between backend and frontend */
+/* commands between backend and frontend */
 #define VSCSIIF_ACT_SCSI_CDB         1    /* SCSI CDB command */
 #define VSCSIIF_ACT_SCSI_ABORT       2    /* SCSI Device(Lun) Abort*/
 #define VSCSIIF_ACT_SCSI_RESET       3    /* SCSI Device(Lun) Reset*/
-
-
-#define VSCSIIF_BACK_MAX_PENDING_REQS    128
+#define VSCSIIF_ACT_SCSI_SG_PRESET   4    /* Preset SG elements */
 
 /*
  * Maximum scatter/gather segments per request.
  *
- * Considering balance between allocating al least 16 "vscsiif_request"
- * structures on one page (4096bytes) and number of scatter gather 
- * needed, we decided to use 26 as a magic number.
+ * Considering balance between allocating at least 16 "vscsiif_request"
+ * structures on one page (4096 bytes) and the number of scatter/gather
+ * elements needed, we decided to use 26 as a magic number.
  */
 #define VSCSIIF_SG_TABLESIZE             26
 
 /*
- * base on linux kernel 2.6.18
+ * based on Linux kernel 2.6.18
  */
 #define VSCSIIF_MAX_COMMAND_SIZE         16
 #define VSCSIIF_SENSE_BUFFERSIZE         96
 
+struct scsiif_request_segment {
+    grant_ref_t gref;
+    uint16_t offset;
+    uint16_t length;
+};
+typedef struct scsiif_request_segment vscsiif_segment_t;
 
 struct vscsiif_request {
     uint16_t rqid;          /* private guest value, echoed in resp  */
@@ -69,18 +73,26 @@ struct vscsiif_request {
                                          DMA_NONE(3) requests  */
     uint8_t nr_segments;              /* Number of pieces of scatter-gather */
 
-    struct scsiif_request_segment {
-        grant_ref_t gref;
-        uint16_t offset;
-        uint16_t length;
-    } seg[VSCSIIF_SG_TABLESIZE];
+    vscsiif_segment_t seg[VSCSIIF_SG_TABLESIZE];
     uint32_t reserved[3];
 };
 typedef struct vscsiif_request vscsiif_request_t;
 
+#define VSCSIIF_SG_LIST_SIZE ((sizeof(vscsiif_request_t) - 4) \
+                              / sizeof(vscsiif_segment_t))
+
+struct vscsiif_sg_list {
+    /* First two fields must match struct vscsiif_request! */
+    uint16_t rqid;          /* private guest value, must match main req */
+    uint8_t act;            /* VSCSIIF_ACT_SCSI_SG_PRESET */
+    uint8_t nr_segments;    /* Number of pieces of scatter-gather */
+    vscsiif_segment_t seg[VSCSIIF_SG_LIST_SIZE];
+};
+typedef struct vscsiif_sg_list vscsiif_sg_list_t;
+
 struct vscsiif_response {
     uint16_t rqid;
-    uint8_t padding;
+    uint8_t act;               /* valid only when backend supports SG_PRESET */
     uint8_t sense_len;
     uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
     int32_t rslt;
diff -r 864f8414c071 -r 302f7c9423c9 include/xen/interface/memory.h
--- a/include/xen/interface/memory.h    Tue Dec 18 10:15:28 2012 +0100
+++ b/include/xen/interface/memory.h    Tue Dec 18 10:42:20 2012 +0100
@@ -198,6 +198,15 @@ struct xen_machphys_mapping {
 typedef struct xen_machphys_mapping xen_machphys_mapping_t;
 DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
 
+/* Source mapping space. */
+/* ` enum phys_map_space { */
+#define XENMAPSPACE_shared_info  0 /* shared info page */
+#define XENMAPSPACE_grant_table  1 /* grant table page */
+#define XENMAPSPACE_gmfn         2 /* GMFN */
+#define XENMAPSPACE_gmfn_range   3 /* GMFN range */
+#define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom */
+/* ` } */
+
 /*
  * Sets the GPFN at which a particular page appears in the specified guest's
  * pseudophysical address space.
@@ -211,24 +220,39 @@ struct xen_add_to_physmap {
     /* Number of pages to go through for gmfn_range */
     uint16_t    size;
 
-    /* Source mapping space. */
-#define XENMAPSPACE_shared_info 0 /* shared info page */
-#define XENMAPSPACE_grant_table 1 /* grant table page */
-#define XENMAPSPACE_gmfn        2 /* GMFN */
-#define XENMAPSPACE_gmfn_range  3 /* GMFN range */
-    unsigned int space;
+    unsigned int space; /* => enum phys_map_space */
 
 #define XENMAPIDX_grant_table_status 0x80000000
 
-    /* Index into source mapping space. */
+    /* Index into space being mapped. */
     xen_ulong_t idx;
 
-    /* GPFN where the source mapping page should appear. */
+    /* GPFN in domid where the source mapping page should appear. */
     xen_pfn_t     gpfn;
 };
 typedef struct xen_add_to_physmap xen_add_to_physmap_t;
 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
 
+/* A batched version of add_to_physmap. */
+#define XENMEM_add_to_physmap_range 23
+struct xen_add_to_physmap_range {
+    /* Which domain to change the mapping for. */
+    domid_t domid;
+    uint16_t space; /* => enum phys_map_space */
+
+    /* Number of pages to go through */
+    uint16_t size;
+    domid_t foreign_domid; /* IFF gmfn_foreign */
+
+    /* Indexes into space being mapped. */
+    XEN_GUEST_HANDLE(xen_ulong_t) idxs;
+
+    /* GPFN in domdwhere the source mapping page should appear. */
+    XEN_GUEST_HANDLE(xen_pfn_t) gpfns;
+};
+typedef struct xen_add_to_physmap_range xen_add_to_physmap_range_t;
+DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
+
 /*
  * Unmaps the page appearing at a particular GPFN from the specified guest's
  * pseudophysical address space.
@@ -397,6 +421,12 @@ struct xen_mem_sharing_op {
 typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
 
+/*
+ * Reserve ops for future/out-of-tree "claim" patches (Oracle)
+ */
+#define XENMEM_claim_pages                  24
+#define XENMEM_get_unclaimed_pages          25
+
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
diff -r 864f8414c071 -r 302f7c9423c9 include/xen/interface/sched.h
--- a/include/xen/interface/sched.h     Tue Dec 18 10:15:28 2012 +0100
+++ b/include/xen/interface/sched.h     Tue Dec 18 10:42:20 2012 +0100
@@ -1,8 +1,8 @@
 /******************************************************************************
  * sched.h
- * 
+ *
  * Scheduler state interactions
- * 
+ *
  * Permission is hereby granted, free of charge, to any person obtaining a copy
  * of this software and associated documentation files (the "Software"), to
  * deal in the Software without restriction, including without limitation the
@@ -30,20 +30,33 @@
 #include "event_channel.h"
 
 /*
+ * `incontents 150 sched Guest Scheduler Operations
+ *
+ * The SCHEDOP interface provides mechanisms for a guest to interact
+ * with the scheduler, including yield, blocking and shutting itself
+ * down.
+ */
+
+/*
  * The prototype for this hypercall is:
- *  long sched_op(int cmd, void *arg)
+ * ` long HYPERVISOR_sched_op(enum sched_op cmd, void *arg, ...)
+ *
  * @cmd == SCHEDOP_??? (scheduler operation).
  * @arg == Operation-specific extra argument(s), as described below.
- * 
+ * ...  == Additional Operation-specific extra arguments, described below.
+ *
  * Versions of Xen prior to 3.0.2 provided only the following legacy version
  * of this hypercall, supporting only the commands yield, block and shutdown:
  *  long sched_op(int cmd, unsigned long arg)
  * @cmd == SCHEDOP_??? (scheduler operation).
  * @arg == 0               (SCHEDOP_yield and SCHEDOP_block)
  *      == SHUTDOWN_* code (SCHEDOP_shutdown)
- * This legacy version is available to new guests as sched_op_compat().
+ *
+ * This legacy version is available to new guests as:
+ * ` long HYPERVISOR_sched_op_compat(enum sched_op cmd, unsigned long arg)
  */
 
+/* ` enum sched_op { // SCHEDOP_* => struct sched_* */
 /*
  * Voluntarily yield the CPU.
  * @arg == NULL.
@@ -61,21 +74,57 @@
 
 /*
  * Halt execution of this domain (all VCPUs) and notify the system controller.
- * @arg == pointer to sched_shutdown structure.
+ * @arg == pointer to sched_shutdown_t structure.
+ *
+ * If the sched_shutdown_t reason is SHUTDOWN_suspend then this
+ * hypercall takes an additional extra argument which should be the
+ * MFN of the guest's start_info_t.
+ *
+ * In addition, which reason is SHUTDOWN_suspend this hypercall
+ * returns 1 if suspend was cancelled or the domain was merely
+ * checkpointed, and 0 if it is resuming in a new domain.
  */
 #define SCHEDOP_shutdown    2
+
+/*
+ * Poll a set of event-channel ports. Return when one or more are pending. An
+ * optional timeout may be specified.
+ * @arg == pointer to sched_poll_t structure.
+ */
+#define SCHEDOP_poll        3
+
+/*
+ * Declare a shutdown for another domain. The main use of this function is
+ * in interpreting shutdown requests and reasons for fully-virtualized
+ * domains.  A para-virtualized domain may use SCHEDOP_shutdown directly.
+ * @arg == pointer to sched_remote_shutdown_t structure.
+ */
+#define SCHEDOP_remote_shutdown        4
+
+/*
+ * Latch a shutdown code, so that when the domain later shuts down it
+ * reports this code to the control tools.
+ * @arg == sched_shutdown_t, as for SCHEDOP_shutdown.
+ */
+#define SCHEDOP_shutdown_code 5
+
+/*
+ * Setup, poke and destroy a domain watchdog timer.
+ * @arg == pointer to sched_watchdog_t structure.
+ * With id == 0, setup a domain watchdog timer to cause domain shutdown
+ *               after timeout, returns watchdog id.
+ * With id != 0 and timeout == 0, destroy domain watchdog timer.
+ * With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
+ */
+#define SCHEDOP_watchdog    6
+/* ` } */
+
 struct sched_shutdown {
-    unsigned int reason; /* SHUTDOWN_* */
+    unsigned int reason; /* SHUTDOWN_* => enum sched_shutdown_reason */
 };
 typedef struct sched_shutdown sched_shutdown_t;
 DEFINE_XEN_GUEST_HANDLE(sched_shutdown_t);
 
-/*
- * Poll a set of event-channel ports. Return when one or more are pending. An
- * optional timeout may be specified.
- * @arg == pointer to sched_poll structure.
- */
-#define SCHEDOP_poll        3
 struct sched_poll {
     XEN_GUEST_HANDLE(evtchn_port_t) ports;
     unsigned int nr_ports;
@@ -84,36 +133,13 @@ struct sched_poll {
 typedef struct sched_poll sched_poll_t;
 DEFINE_XEN_GUEST_HANDLE(sched_poll_t);
 
-/*
- * Declare a shutdown for another domain. The main use of this function is
- * in interpreting shutdown requests and reasons for fully-virtualized
- * domains.  A para-virtualized domain may use SCHEDOP_shutdown directly.
- * @arg == pointer to sched_remote_shutdown structure.
- */
-#define SCHEDOP_remote_shutdown        4
 struct sched_remote_shutdown {
     domid_t domain_id;         /* Remote domain ID */
-    unsigned int reason;       /* SHUTDOWN_xxx reason */
+    unsigned int reason;       /* SHUTDOWN_* => enum sched_shutdown_reason */
 };
 typedef struct sched_remote_shutdown sched_remote_shutdown_t;
 DEFINE_XEN_GUEST_HANDLE(sched_remote_shutdown_t);
 
-/*
- * Latch a shutdown code, so that when the domain later shuts down it
- * reports this code to the control tools.
- * @arg == as for SCHEDOP_shutdown.
- */
-#define SCHEDOP_shutdown_code 5
-
-/*
- * Setup, poke and destroy a domain watchdog timer.
- * @arg == pointer to sched_watchdog structure.
- * With id == 0, setup a domain watchdog timer to cause domain shutdown
- *               after timeout, returns watchdog id.
- * With id != 0 and timeout == 0, destroy domain watchdog timer.
- * With id != 0 and timeout != 0, poke watchdog timer and set new timeout.
- */
-#define SCHEDOP_watchdog    6
 struct sched_watchdog {
     uint32_t id;                /* watchdog ID */
     uint32_t timeout;           /* timeout */
@@ -126,11 +152,13 @@ DEFINE_XEN_GUEST_HANDLE(sched_watchdog_t
  * software to determine the appropriate action. For the most part, Xen does
  * not care about the shutdown code.
  */
+/* ` enum sched_shutdown_reason { */
 #define SHUTDOWN_poweroff   0  /* Domain exited normally. Clean up and kill. */
 #define SHUTDOWN_reboot     1  /* Clean up, kill, and then restart.          */
 #define SHUTDOWN_suspend    2  /* Clean up, save suspend info, kill.         */
 #define SHUTDOWN_crash      3  /* Tell controller we've crashed.             */
 #define SHUTDOWN_watchdog   4  /* Restart because watchdog time expired.     */
+/* ` } */
 
 #endif /* __XEN_PUBLIC_SCHED_H__ */
 
diff -r 864f8414c071 -r 302f7c9423c9 include/xen/interface/version.h
--- a/include/xen/interface/version.h   Tue Dec 18 10:15:28 2012 +0100
+++ b/include/xen/interface/version.h   Tue Dec 18 10:42:20 2012 +0100
@@ -28,6 +28,8 @@
 #ifndef __XEN_PUBLIC_VERSION_H__
 #define __XEN_PUBLIC_VERSION_H__
 
+#include "xen.h"
+
 /* NB. All ops return zero on success, except XENVER_{version,pagesize} */
 
 /* arg == NULL; returns major:minor (16:16). */
@@ -58,7 +60,7 @@ typedef char xen_changeset_info_t[64];
 
 #define XENVER_platform_parameters 5
 struct xen_platform_parameters {
-    unsigned long virt_start;
+    xen_ulong_t virt_start;
 };
 typedef struct xen_platform_parameters xen_platform_parameters_t;
 
diff -r 864f8414c071 -r 302f7c9423c9 include/xen/interface/xen-compat.h
--- a/include/xen/interface/xen-compat.h        Tue Dec 18 10:15:28 2012 +0100
+++ b/include/xen/interface/xen-compat.h        Tue Dec 18 10:42:20 2012 +0100
@@ -27,7 +27,7 @@
 #ifndef __XEN_PUBLIC_XEN_COMPAT_H__
 #define __XEN_PUBLIC_XEN_COMPAT_H__
 
-#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040200
+#define __XEN_LATEST_INTERFACE_VERSION__ 0x00040300
 
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 /* Xen is built with matching headers and implements the latest interface. */
diff -r 864f8414c071 -r 302f7c9423c9 include/xen/interface/xen.h
--- a/include/xen/interface/xen.h       Tue Dec 18 10:15:28 2012 +0100
+++ b/include/xen/interface/xen.h       Tue Dec 18 10:42:20 2012 +0100
@@ -45,12 +45,15 @@ DEFINE_XEN_GUEST_HANDLE(char);
 __DEFINE_XEN_GUEST_HANDLE(uchar, unsigned char);
 DEFINE_XEN_GUEST_HANDLE(int);
 __DEFINE_XEN_GUEST_HANDLE(uint,  unsigned int);
+#if __XEN_INTERFACE_VERSION__ < 0x00040300
 DEFINE_XEN_GUEST_HANDLE(long);
 __DEFINE_XEN_GUEST_HANDLE(ulong, unsigned long);
+#endif
 DEFINE_XEN_GUEST_HANDLE(void);
 
 DEFINE_XEN_GUEST_HANDLE(uint64_t);
 DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
+DEFINE_XEN_GUEST_HANDLE(xen_ulong_t);
 #endif
 
 /*
@@ -318,48 +321,54 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
 
 /*
  * MMU EXTENDED OPERATIONS
- * 
- * HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
+ *
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_mmuext_op(mmuext_op_t uops[],
+ * `                      unsigned int count,
+ * `                      unsigned int *pdone,
+ * `                      unsigned int foreigndom)
+ */
+/* HYPERVISOR_mmuext_op() accepts a list of mmuext_op structures.
  * A foreigndom (FD) can be specified (or DOMID_SELF for none).
  * Where the FD has some effect, it is described below.
- * 
+ *
  * cmd: MMUEXT_(UN)PIN_*_TABLE
  * mfn: Machine frame number to be (un)pinned as a p.t. page.
  *      The frame must belong to the FD, if one is specified.
- * 
+ *
  * cmd: MMUEXT_NEW_BASEPTR
  * mfn: Machine frame number of new page-table base to install in MMU.
- * 
+ *
  * cmd: MMUEXT_NEW_USER_BASEPTR [x86/64 only]
  * mfn: Machine frame number of new page-table base to install in MMU
  *      when in user space.
- * 
+ *
  * cmd: MMUEXT_TLB_FLUSH_LOCAL
  * No additional arguments. Flushes local TLB.
- * 
+ *
  * cmd: MMUEXT_INVLPG_LOCAL
  * linear_addr: Linear address to be flushed from the local TLB.
- * 
+ *
  * cmd: MMUEXT_TLB_FLUSH_MULTI
  * vcpumask: Pointer to bitmap of VCPUs to be flushed.
- * 
+ *
  * cmd: MMUEXT_INVLPG_MULTI
  * linear_addr: Linear address to be flushed.
  * vcpumask: Pointer to bitmap of VCPUs to be flushed.
- * 
+ *
  * cmd: MMUEXT_TLB_FLUSH_ALL
  * No additional arguments. Flushes all VCPUs' TLBs.
- * 
+ *
  * cmd: MMUEXT_INVLPG_ALL
  * linear_addr: Linear address to be flushed from all VCPUs' TLBs.
- * 
+ *
  * cmd: MMUEXT_FLUSH_CACHE
  * No additional arguments. Writes back and flushes cache contents.
  *
  * cmd: MMUEXT_FLUSH_CACHE_GLOBAL
  * No additional arguments. Writes back and flushes cache contents
  * on all CPUs in the system.
- * 
+ *
  * cmd: MMUEXT_SET_LDT
  * linear_addr: Linear address of LDT base (NB. must be page-aligned).
  * nr_ents: Number of entries in LDT.
@@ -374,6 +383,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
  * cmd: MMUEXT_[UN]MARK_SUPER
  * mfn: Machine frame number of head of superpage to be [un]marked.
  */
+/* ` enum mmuext_cmd { */
 #define MMUEXT_PIN_L1_TABLE      0
 #define MMUEXT_PIN_L2_TABLE      1
 #define MMUEXT_PIN_L3_TABLE      2
@@ -394,10 +404,11 @@ DEFINE_XEN_GUEST_HANDLE(xen_pfn_t);
 #define MMUEXT_FLUSH_CACHE_GLOBAL 18
 #define MMUEXT_MARK_SUPER       19
 #define MMUEXT_UNMARK_SUPER     20
+/* ` } */
 
 #ifndef __ASSEMBLY__
 struct mmuext_op {
-    unsigned int cmd;
+    unsigned int cmd; /* => enum mmuext_cmd */
     union {
         /* [UN]PIN_TABLE, NEW_BASEPTR, NEW_USER_BASEPTR
          * CLEAR_PAGE, COPY_PAGE, [UN]MARK_SUPER */
@@ -422,9 +433,24 @@ typedef struct mmuext_op mmuext_op_t;
 DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
 #endif
 
+/*
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_va_mapping(unsigned long va, u64 val,
+ * `                              enum uvm_flags flags)
+ * `
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_update_va_mapping_otherdomain(unsigned long va, u64 val,
+ * `                                          enum uvm_flags flags,
+ * `                                          domid_t domid)
+ * `
+ * ` @va: The virtual address whose mapping we want to change
+ * ` @val: The new page table entry, must contain a machine address
+ * ` @flags: Control TLB flushes
+ */
 /* These are passed as 'flags' to update_va_mapping. They can be ORed. */
 /* When specifying UVMF_MULTI, also OR in a pointer to a CPU bitmap.   */
 /* UVMF_LOCAL is merely UVMF_MULTI with a NULL bitmap pointer.         */
+/* ` enum uvm_flags { */
 #define UVMF_NONE               (0UL<<0) /* No flushing at all.   */
 #define UVMF_TLB_FLUSH          (1UL<<0) /* Flush entire TLB(s).  */
 #define UVMF_INVLPG             (2UL<<0) /* Flush only one entry. */
@@ -432,6 +458,7 @@ DEFINE_XEN_GUEST_HANDLE(mmuext_op_t);
 #define UVMF_MULTI              (0UL<<2) /* Flush subset of TLBs. */
 #define UVMF_LOCAL              (0UL<<2) /* Flush local TLB.      */
 #define UVMF_ALL                (1UL<<2) /* Flush all TLBs.       */
+/* ` } */
 
 /*
  * Commands to HYPERVISOR_console_io().
@@ -514,7 +541,10 @@ typedef struct mmu_update mmu_update_t;
 DEFINE_XEN_GUEST_HANDLE(mmu_update_t);
 
 /*
- * Send an array of these to HYPERVISOR_multicall().
+ * ` enum neg_errnoval
+ * ` HYPERVISOR_multicall(multicall_entry_t call_list[],
+ * `                      unsigned int nr_calls);
+ *
  * NB. The fields are natural register size for this architecture.
  */
 struct multicall_entry {
@@ -654,7 +684,8 @@ typedef struct shared_info shared_info_t
 #endif
 
 /*
- * Start-of-day memory layout:
+ * `incontents 200 startofday Start-of-day memory layout
+ *
  *  1. The domain is started within contiguous virtual-memory region.
  *  2. The contiguous region ends on an aligned 4MB boundary.
  *  3. This the order of bootstrap elements in the initial virtual region:

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.