[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] dm_op: convert HVMOP_*ioreq_server*



commit a2323df5f47c630eb763cac82f6680260ead24c4
Author:     Paul Durrant <paul.durrant@xxxxxxxxxx>
AuthorDate: Wed Jan 25 10:41:35 2017 +0100
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Jan 25 10:41:35 2017 +0100

    dm_op: convert HVMOP_*ioreq_server*
    
    The definitions of HVM_IOREQSRV_BUFIOREQ_* have to persist as they are
    already in use by callers of the libxc interface.
    
    Suggested-by: Jan Beulich <jbeulich@xxxxxxxx>
    Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx>
    Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
    Acked-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx>
    Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
 tools/libxc/xc_domain.c          | 212 ++++++++++++++++---------------------
 xen/arch/x86/hvm/dm.c            |  99 ++++++++++++++++++
 xen/arch/x86/hvm/hvm.c           | 219 ---------------------------------------
 xen/arch/x86/hvm/ioreq.c         |  36 +++----
 xen/include/asm-x86/hvm/domain.h |   3 +-
 xen/include/public/hvm/dm_op.h   | 158 ++++++++++++++++++++++++++++
 xen/include/public/hvm/hvm_op.h  | 132 +----------------------
 xen/include/xlat.lst             |   5 +
 xen/include/xsm/dummy.h          |   6 --
 xen/include/xsm/xsm.h            |   6 --
 xen/xsm/dummy.c                  |   1 -
 xen/xsm/flask/hooks.c            |   6 --
 12 files changed, 376 insertions(+), 507 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 296b852..419a897 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1417,24 +1417,24 @@ int xc_hvm_create_ioreq_server(xc_interface *xch,
                                int handle_bufioreq,
                                ioservid_t *id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_create_ioreq_server_t, arg);
+    struct xen_dm_op op;
+    struct xen_dm_op_create_ioreq_server *data;
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->handle_bufioreq = handle_bufioreq;
+    op.op = XEN_DMOP_create_ioreq_server;
+    data = &op.u.create_ioreq_server;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_create_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->handle_bufioreq = handle_bufioreq;
+
+    rc = do_dm_op(xch, domid, 1, &op, sizeof(op));
+    if ( rc )
+        return rc;
 
-    *id = arg->id;
+    *id = data->id;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return 0;
 }
 
 int xc_hvm_get_ioreq_server_info(xc_interface *xch,
@@ -1444,84 +1444,71 @@ int xc_hvm_get_ioreq_server_info(xc_interface *xch,
                                  xen_pfn_t *bufioreq_pfn,
                                  evtchn_port_t *bufioreq_port)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_get_ioreq_server_info_t, arg);
+    struct xen_dm_op op;
+    struct xen_dm_op_get_ioreq_server_info *data;
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
+    op.op = XEN_DMOP_get_ioreq_server_info;
+    data = &op.u.get_ioreq_server_info;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_get_ioreq_server_info,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-    if ( rc != 0 )
-        goto done;
+    data->id = id;
+
+    rc = do_dm_op(xch, domid, 1, &op, sizeof(op));
+    if ( rc )
+        return rc;
 
     if ( ioreq_pfn )
-        *ioreq_pfn = arg->ioreq_pfn;
+        *ioreq_pfn = data->ioreq_pfn;
 
     if ( bufioreq_pfn )
-        *bufioreq_pfn = arg->bufioreq_pfn;
+        *bufioreq_pfn = data->bufioreq_pfn;
 
     if ( bufioreq_port )
-        *bufioreq_port = arg->bufioreq_port;
+        *bufioreq_port = data->bufioreq_port;
 
-done:
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return 0;
 }
 
 int xc_hvm_map_io_range_to_ioreq_server(xc_interface *xch, domid_t domid,
                                         ioservid_t id, int is_mmio,
                                         uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_ioreq_server_range *data;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
+    op.op = XEN_DMOP_map_io_range_to_ioreq_server;
+    data = &op.u.map_io_range_to_ioreq_server;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->id = id;
+    data->type = is_mmio ? XEN_DMOP_IO_RANGE_MEMORY : XEN_DMOP_IO_RANGE_PORT;
+    data->start = start;
+    data->end = end;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_hvm_unmap_io_range_from_ioreq_server(xc_interface *xch, domid_t domid,
                                             ioservid_t id, int is_mmio,
                                             uint64_t start, uint64_t end)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_ioreq_server_range *data;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = is_mmio ? HVMOP_IO_RANGE_MEMORY : HVMOP_IO_RANGE_PORT;
-    arg->start = start;
-    arg->end = end;
+    op.op = XEN_DMOP_unmap_io_range_from_ioreq_server;
+    data = &op.u.unmap_io_range_from_ioreq_server;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->id = id;
+    data->type = is_mmio ? XEN_DMOP_IO_RANGE_MEMORY : XEN_DMOP_IO_RANGE_PORT;
+    data->start = start;
+    data->end = end;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_hvm_map_pcidev_to_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1529,37 +1516,32 @@ int xc_hvm_map_pcidev_to_ioreq_server(xc_interface 
*xch, domid_t domid,
                                       uint8_t bus, uint8_t device,
                                       uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_ioreq_server_range *data;
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
+    op.op = XEN_DMOP_map_io_range_to_ioreq_server;
+    data = &op.u.map_io_range_to_ioreq_server;
+
+    data->id = id;
+    data->type = XEN_DMOP_IO_RANGE_PCI;
 
     /*
      * The underlying hypercall will deal with ranges of PCI SBDF
      * but, for simplicity, the API only uses singletons.
      */
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_map_io_range_to_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->start = data->end = XEN_DMOP_PCI_SBDF((uint64_t)segment,
+                                                (uint64_t)bus,
+                                                (uint64_t)device,
+                                                (uint64_t)function);
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface *xch, domid_t domid,
@@ -1567,54 +1549,49 @@ int xc_hvm_unmap_pcidev_from_ioreq_server(xc_interface 
*xch, domid_t domid,
                                           uint8_t bus, uint8_t device,
                                           uint8_t function)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_io_range_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_ioreq_server_range *data;
 
     if (device > 0x1f || function > 0x7) {
         errno = EINVAL;
         return -1;
     }
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->type = HVMOP_IO_RANGE_PCI;
-    arg->start = arg->end = HVMOP_PCI_SBDF((uint64_t)segment,
-                                           (uint64_t)bus,
-                                           (uint64_t)device,
-                                           (uint64_t)function);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_unmap_io_range_from_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    op.op = XEN_DMOP_unmap_io_range_from_ioreq_server;
+    data = &op.u.unmap_io_range_from_ioreq_server;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    data->id = id;
+    data->type = XEN_DMOP_IO_RANGE_PCI;
+
+    /*
+     * The underlying hypercall will deal with ranges of PCI SBDF
+     * but, for simplicity, the API only uses singletons.
+     */
+    data->start = data->end = XEN_DMOP_PCI_SBDF((uint64_t)segment,
+                                                (uint64_t)bus,
+                                                (uint64_t)device,
+                                                (uint64_t)function);
+
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_hvm_destroy_ioreq_server(xc_interface *xch,
                                 domid_t domid,
                                 ioservid_t id)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_destroy_ioreq_server_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_destroy_ioreq_server *data;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
+    op.op = XEN_DMOP_destroy_ioreq_server;
+    data = &op.u.destroy_ioreq_server;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_destroy_ioreq_server,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->id = id;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_hvm_set_ioreq_server_state(xc_interface *xch,
@@ -1622,23 +1599,18 @@ int xc_hvm_set_ioreq_server_state(xc_interface *xch,
                                   ioservid_t id,
                                   int enabled)
 {
-    DECLARE_HYPERCALL_BUFFER(xen_hvm_set_ioreq_server_state_t, arg);
-    int rc;
+    struct xen_dm_op op;
+    struct xen_dm_op_set_ioreq_server_state *data;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-        return -1;
+    memset(&op, 0, sizeof(op));
 
-    arg->domid = domid;
-    arg->id = id;
-    arg->enabled = !!enabled;
+    op.op = XEN_DMOP_set_ioreq_server_state;
+    data = &op.u.set_ioreq_server_state;
 
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_ioreq_server_state,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    data->id = id;
+    data->enabled = !!enabled;
 
-    xc_hypercall_buffer_free(xch, arg);
-    return rc;
+    return do_dm_op(xch, domid, 1, &op, sizeof(op));
 }
 
 int xc_domain_setdebugging(xc_interface *xch,
diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c
index d212a68..f92a0be 100644
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -78,8 +78,101 @@ static int dm_op(domid_t domid,
         goto out;
     }
 
+    rc = -EINVAL;
+    if ( op.pad )
+        goto out;
+
     switch ( op.op )
     {
+    case XEN_DMOP_create_ioreq_server:
+    {
+        struct domain *curr_d = current->domain;
+        struct xen_dm_op_create_ioreq_server *data =
+            &op.u.create_ioreq_server;
+
+        const_op = false;
+
+        rc = -EINVAL;
+        if ( data->pad[0] || data->pad[1] || data->pad[2] )
+            break;
+
+        rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
+                                     data->handle_bufioreq, &data->id);
+        break;
+    }
+
+    case XEN_DMOP_get_ioreq_server_info:
+    {
+        struct xen_dm_op_get_ioreq_server_info *data =
+            &op.u.get_ioreq_server_info;
+
+        const_op = false;
+
+        rc = -EINVAL;
+        if ( data->pad )
+            break;
+
+        rc = hvm_get_ioreq_server_info(d, data->id,
+                                       &data->ioreq_pfn,
+                                       &data->bufioreq_pfn,
+                                       &data->bufioreq_port);
+        break;
+    }
+
+    case XEN_DMOP_map_io_range_to_ioreq_server:
+    {
+        const struct xen_dm_op_ioreq_server_range *data =
+            &op.u.map_io_range_to_ioreq_server;
+
+        rc = -EINVAL;
+        if ( data->pad )
+            break;
+
+        rc = hvm_map_io_range_to_ioreq_server(d, data->id, data->type,
+                                              data->start, data->end);
+        break;
+    }
+
+    case XEN_DMOP_unmap_io_range_from_ioreq_server:
+    {
+        const struct xen_dm_op_ioreq_server_range *data =
+            &op.u.unmap_io_range_from_ioreq_server;
+
+        rc = -EINVAL;
+        if ( data->pad )
+            break;
+
+        rc = hvm_unmap_io_range_from_ioreq_server(d, data->id, data->type,
+                                                  data->start, data->end);
+        break;
+    }
+
+    case XEN_DMOP_set_ioreq_server_state:
+    {
+        const struct xen_dm_op_set_ioreq_server_state *data =
+            &op.u.set_ioreq_server_state;
+
+        rc = -EINVAL;
+        if ( data->pad )
+            break;
+
+        rc = hvm_set_ioreq_server_state(d, data->id, !!data->enabled);
+        break;
+    }
+
+    case XEN_DMOP_destroy_ioreq_server:
+    {
+        const struct xen_dm_op_destroy_ioreq_server *data =
+            &op.u.destroy_ioreq_server;
+
+        rc = -EINVAL;
+        if ( data->pad )
+            break;
+
+        rc = hvm_destroy_ioreq_server(d, data->id);
+        break;
+    }
+
     default:
         rc = -EOPNOTSUPP;
         break;
@@ -96,6 +189,12 @@ static int dm_op(domid_t domid,
     return rc;
 }
 
+CHECK_dm_op_create_ioreq_server;
+CHECK_dm_op_get_ioreq_server_info;
+CHECK_dm_op_ioreq_server_range;
+CHECK_dm_op_set_ioreq_server_state;
+CHECK_dm_op_destroy_ioreq_server;
+
 #define MAX_NR_BUFS 1
 
 int compat_dm_op(domid_t domid,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 1b13cdd..0224f69 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4383,195 +4383,6 @@ static int hvmop_flush_tlb_all(void)
     return 0;
 }
 
-static int hvmop_create_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_create_ioreq_server_t) uop)
-{
-    struct domain *curr_d = current->domain;
-    xen_hvm_create_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_create_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_create_ioreq_server(d, curr_d->domain_id, 0,
-                                 op.handle_bufioreq, &op.id);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_get_ioreq_server_info(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_get_ioreq_server_info_t) uop)
-{
-    xen_hvm_get_ioreq_server_info_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_get_ioreq_server_info);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_get_ioreq_server_info(d, op.id,
-                                   &op.ioreq_pfn,
-                                   &op.bufioreq_pfn, 
-                                   &op.bufioreq_port);
-    if ( rc != 0 )
-        goto out;
-
-    rc = copy_to_guest(uop, &op, 1) ? -EFAULT : 0;
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_map_io_range_to_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, 
HVMOP_map_io_range_to_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_map_io_range_to_ioreq_server(d, op.id, op.type,
-                                          op.start, op.end);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_unmap_io_range_from_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_io_range_t) uop)
-{
-    xen_hvm_io_range_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, 
HVMOP_unmap_io_range_from_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_unmap_io_range_from_ioreq_server(d, op.id, op.type,
-                                              op.start, op.end);
-    
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_set_ioreq_server_state(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_ioreq_server_state_t) uop)
-{
-    xen_hvm_set_ioreq_server_state_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_set_ioreq_server_state);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_set_ioreq_server_state(d, op.id, !!op.enabled);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
-static int hvmop_destroy_ioreq_server(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_destroy_ioreq_server_t) uop)
-{
-    xen_hvm_destroy_ioreq_server_t op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_ioreq_server(XSM_DM_PRIV, d, HVMOP_destroy_ioreq_server);
-    if ( rc != 0 )
-        goto out;
-
-    rc = hvm_destroy_ioreq_server(d, op.id);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_set_evtchn_upcall_vector(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_evtchn_upcall_vector_t) uop)
 {
@@ -5288,36 +5099,6 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
     start_iter = op & ~mask;
     switch ( op &= mask )
     {
-    case HVMOP_create_ioreq_server:
-        rc = hvmop_create_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_create_ioreq_server_t));
-        break;
-    
-    case HVMOP_get_ioreq_server_info:
-        rc = hvmop_get_ioreq_server_info(
-            guest_handle_cast(arg, xen_hvm_get_ioreq_server_info_t));
-        break;
-    
-    case HVMOP_map_io_range_to_ioreq_server:
-        rc = hvmop_map_io_range_to_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-    
-    case HVMOP_unmap_io_range_from_ioreq_server:
-        rc = hvmop_unmap_io_range_from_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_io_range_t));
-        break;
-
-    case HVMOP_set_ioreq_server_state:
-        rc = hvmop_set_ioreq_server_state(
-            guest_handle_cast(arg, xen_hvm_set_ioreq_server_state_t));
-        break;
-    
-    case HVMOP_destroy_ioreq_server:
-        rc = hvmop_destroy_ioreq_server(
-            guest_handle_cast(arg, xen_hvm_destroy_ioreq_server_t));
-        break;
-    
     case HVMOP_set_evtchn_upcall_vector:
         rc = hvmop_set_evtchn_upcall_vector(
             guest_handle_cast(arg, xen_hvm_evtchn_upcall_vector_t));
diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
index 26a0cb8..899ba64 100644
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -514,9 +514,9 @@ static int hvm_ioreq_server_alloc_rangesets(struct 
hvm_ioreq_server *s,
         char *name;
 
         rc = asprintf(&name, "ioreq_server %d %s", s->id,
-                      (i == HVMOP_IO_RANGE_PORT) ? "port" :
-                      (i == HVMOP_IO_RANGE_MEMORY) ? "memory" :
-                      (i == HVMOP_IO_RANGE_PCI) ? "pci" :
+                      (i == XEN_DMOP_IO_RANGE_PORT) ? "port" :
+                      (i == XEN_DMOP_IO_RANGE_MEMORY) ? "memory" :
+                      (i == XEN_DMOP_IO_RANGE_PCI) ? "pci" :
                       "");
         if ( rc )
             goto fail;
@@ -834,9 +834,9 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, 
ioservid_t id,
 
             switch ( type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
+            case XEN_DMOP_IO_RANGE_PORT:
+            case XEN_DMOP_IO_RANGE_MEMORY:
+            case XEN_DMOP_IO_RANGE_PCI:
                 r = s->range[type];
                 break;
 
@@ -886,9 +886,9 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, 
ioservid_t id,
 
             switch ( type )
             {
-            case HVMOP_IO_RANGE_PORT:
-            case HVMOP_IO_RANGE_MEMORY:
-            case HVMOP_IO_RANGE_PCI:
+            case XEN_DMOP_IO_RANGE_PORT:
+            case XEN_DMOP_IO_RANGE_MEMORY:
+            case XEN_DMOP_IO_RANGE_PCI:
                 r = s->range[type];
                 break;
 
@@ -1129,12 +1129,12 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
 
         /* PCI config data cycle */
 
-        sbdf = HVMOP_PCI_SBDF(0,
-                              PCI_BUS(CF8_BDF(cf8)),
-                              PCI_SLOT(CF8_BDF(cf8)),
-                              PCI_FUNC(CF8_BDF(cf8)));
+        sbdf = XEN_DMOP_PCI_SBDF(0,
+                                 PCI_BUS(CF8_BDF(cf8)),
+                                 PCI_SLOT(CF8_BDF(cf8)),
+                                 PCI_FUNC(CF8_BDF(cf8)));
 
-        type = HVMOP_IO_RANGE_PCI;
+        type = XEN_DMOP_IO_RANGE_PCI;
         addr = ((uint64_t)sbdf << 32) |
                CF8_ADDR_LO(cf8) |
                (p->addr & 3);
@@ -1155,7 +1155,7 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
     else
     {
         type = (p->type == IOREQ_TYPE_PIO) ?
-                HVMOP_IO_RANGE_PORT : HVMOP_IO_RANGE_MEMORY;
+                XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
         addr = p->addr;
     }
 
@@ -1177,19 +1177,19 @@ struct hvm_ioreq_server *hvm_select_ioreq_server(struct 
domain *d,
         {
             unsigned long end;
 
-        case HVMOP_IO_RANGE_PORT:
+        case XEN_DMOP_IO_RANGE_PORT:
             end = addr + p->size - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_MEMORY:
+        case XEN_DMOP_IO_RANGE_MEMORY:
             end = addr + (p->size * p->count) - 1;
             if ( rangeset_contains_range(r, addr, end) )
                 return s;
 
             break;
-        case HVMOP_IO_RANGE_PCI:
+        case XEN_DMOP_IO_RANGE_PCI:
             if ( rangeset_contains_singleton(r, addr >> 32) )
             {
                 p->type = IOREQ_TYPE_PCI_CONFIG;
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 62e874a..420cbdc 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -33,6 +33,7 @@
 #include <public/hvm/params.h>
 #include <public/hvm/save.h>
 #include <public/hvm/hvm_op.h>
+#include <public/hvm/dm_op.h>
 
 struct hvm_ioreq_page {
     unsigned long gmfn;
@@ -47,7 +48,7 @@ struct hvm_ioreq_vcpu {
     bool_t           pending;
 };
 
-#define NR_IO_RANGE_TYPES (HVMOP_IO_RANGE_PCI + 1)
+#define NR_IO_RANGE_TYPES (XEN_DMOP_IO_RANGE_PCI + 1)
 #define MAX_NR_IO_RANGES  256
 
 struct hvm_ioreq_server {
diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h
index 8627a64..dcd407b 100644
--- a/xen/include/public/hvm/dm_op.h
+++ b/xen/include/public/hvm/dm_op.h
@@ -28,8 +28,166 @@
 
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 
+#include "../event_channel.h"
+
+#ifndef uint64_aligned_t
+#define uint64_aligned_t uint64_t
+#endif
+
+/*
+ * IOREQ Servers
+ *
+ * The interface between an I/O emulator an Xen is called an IOREQ Server.
+ * A domain supports a single 'legacy' IOREQ Server which is instantiated if
+ * parameter...
+ *
+ * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
+ * ioreq structures), or...
+ * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
+ * ioreq ring), or...
+ * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
+ * to request buffered I/O emulation).
+ *
+ * The following hypercalls facilitate the creation of IOREQ Servers for
+ * 'secondary' emulators which are invoked to implement port I/O, memory, or
+ * PCI config space ranges which they explicitly register.
+ */
+
+typedef uint16_t ioservid_t;
+
+/*
+ * XEN_DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
+ *                               secondary emulator.
+ *
+ * The <id> handed back is unique for target domain. The valur of
+ * <handle_bufioreq> should be one of HVM_IOREQSRV_BUFIOREQ_* defined in
+ * hvm_op.h. If the value is HVM_IOREQSRV_BUFIOREQ_OFF then  the buffered
+ * ioreq ring will not be allocated and hence all emulation requests to
+ * this server will be synchronous.
+ */
+#define XEN_DMOP_create_ioreq_server 1
+
+struct xen_dm_op_create_ioreq_server {
+    /* IN - should server handle buffered ioreqs */
+    uint8_t handle_bufioreq;
+    uint8_t pad[3];
+    /* OUT - server id */
+    ioservid_t id;
+};
+
+/*
+ * XEN_DMOP_get_ioreq_server_info: Get all the information necessary to
+ *                                 access IOREQ Server <id>.
+ *
+ * The emulator needs to map the synchronous ioreq structures and buffered
+ * ioreq ring (if it exists) that Xen uses to request emulation. These are
+ * hosted in the target domain's gmfns <ioreq_pfn> and <bufioreq_pfn>
+ * respectively. In addition, if the IOREQ Server is handling buffered
+ * emulation requests, the emulator needs to bind to event channel
+ * <bufioreq_port> to listen for them. (The event channels used for
+ * synchronous emulation requests are specified in the per-CPU ioreq
+ * structures in <ioreq_pfn>).
+ * If the IOREQ Server is not handling buffered emulation requests then the
+ * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
+ */
+#define XEN_DMOP_get_ioreq_server_info 2
+
+struct xen_dm_op_get_ioreq_server_info {
+    /* IN - server id */
+    ioservid_t id;
+    uint16_t pad;
+    /* OUT - buffered ioreq port */
+    evtchn_port_t bufioreq_port;
+    /* OUT - sync ioreq pfn */
+    uint64_aligned_t ioreq_pfn;
+    /* OUT - buffered ioreq pfn */
+    uint64_aligned_t bufioreq_pfn;
+};
+
+/*
+ * XEN_DMOP_map_io_range_to_ioreq_server: Register an I/O range for
+ *                                        emulation by the client of
+ *                                        IOREQ Server <id>.
+ * XEN_DMOP_unmap_io_range_from_ioreq_server: Deregister an I/O range
+ *                                            previously registered for
+ *                                            emulation by the client of
+ *                                            IOREQ Server <id>.
+ *
+ * There are three types of I/O that can be emulated: port I/O, memory
+ * accesses and PCI config space accesses. The <type> field denotes which
+ * type of range* the <start> and <end> (inclusive) fields are specifying.
+ * PCI config space ranges are specified by segment/bus/device/function
+ * values which should be encoded using the DMOP_PCI_SBDF helper macro
+ * below.
+ *
+ * NOTE: unless an emulation request falls entirely within a range mapped
+ * by a secondary emulator, it will not be passed to that emulator.
+ */
+#define XEN_DMOP_map_io_range_to_ioreq_server 3
+#define XEN_DMOP_unmap_io_range_from_ioreq_server 4
+
+struct xen_dm_op_ioreq_server_range {
+    /* IN - server id */
+    ioservid_t id;
+    uint16_t pad;
+    /* IN - type of range */
+    uint32_t type;
+# define XEN_DMOP_IO_RANGE_PORT   0 /* I/O port range */
+# define XEN_DMOP_IO_RANGE_MEMORY 1 /* MMIO range */
+# define XEN_DMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
+    /* IN - inclusive start and end of range */
+    uint64_aligned_t start, end;
+};
+
+#define XEN_DMOP_PCI_SBDF(s,b,d,f) \
+       ((((s) & 0xffff) << 16) |  \
+        (((b) & 0xff) << 8) |     \
+        (((d) & 0x1f) << 3) |     \
+        ((f) & 0x07))
+
+/*
+ * XEN_DMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
+ *
+ * The IOREQ Server will not be passed any emulation requests until it is
+ * in the enabled state.
+ * Note that the contents of the ioreq_pfn and bufioreq_fn (see
+ * XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
+ * is in the enabled state.
+ */
+#define XEN_DMOP_set_ioreq_server_state 5
+
+struct xen_dm_op_set_ioreq_server_state {
+    /* IN - server id */
+    ioservid_t id;
+    /* IN - enabled? */
+    uint8_t enabled;
+    uint8_t pad;
+};
+
+/*
+ * XEN_DMOP_destroy_ioreq_server: Destroy the IOREQ Server <id>.
+ *
+ * Any registered I/O ranges will be automatically deregistered.
+ */
+#define XEN_DMOP_destroy_ioreq_server 6
+
+struct xen_dm_op_destroy_ioreq_server {
+    /* IN - server id */
+    ioservid_t id;
+    uint16_t pad;
+};
+
 struct xen_dm_op {
     uint32_t op;
+    uint32_t pad;
+    union {
+        struct xen_dm_op_create_ioreq_server create_ioreq_server;
+        struct xen_dm_op_get_ioreq_server_info get_ioreq_server_info;
+        struct xen_dm_op_ioreq_server_range map_io_range_to_ioreq_server;
+        struct xen_dm_op_ioreq_server_range unmap_io_range_from_ioreq_server;
+        struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state;
+        struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server;
+    } u;
 };
 
 #endif /* __XEN__ || __XEN_TOOLS__ */
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index b3e45cf..6fcd86d 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -243,37 +243,10 @@ typedef struct xen_hvm_inject_msi xen_hvm_inject_msi_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_msi_t);
 
 /*
- * IOREQ Servers
- *
- * The interface between an I/O emulator an Xen is called an IOREQ Server.
- * A domain supports a single 'legacy' IOREQ Server which is instantiated if
- * parameter...
- *
- * HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
- * ioreq structures), or...
- * HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
- * ioreq ring), or...
- * HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
- * to request buffered I/O emulation).
- * 
- * The following hypercalls facilitate the creation of IOREQ Servers for
- * 'secondary' emulators which are invoked to implement port I/O, memory, or
- * PCI config space ranges which they explicitly register.
+ * Definitions relating to DMOP_create_ioreq_server. (Defined here for
+ * backwards compatibility).
  */
 
-typedef uint16_t ioservid_t;
-
-/*
- * HVMOP_create_ioreq_server: Instantiate a new IOREQ Server for a secondary
- *                            emulator servicing domain <domid>.
- *
- * The <id> handed back is unique for <domid>. If <handle_bufioreq> is zero
- * the buffered ioreq ring will not be allocated and hence all emulation
- * requestes to this server will be synchronous.
- */
-#define HVMOP_create_ioreq_server 17
-struct xen_hvm_create_ioreq_server {
-    domid_t domid;           /* IN - domain to be serviced */
 #define HVM_IOREQSRV_BUFIOREQ_OFF    0
 #define HVM_IOREQSRV_BUFIOREQ_LEGACY 1
 /*
@@ -281,107 +254,6 @@ struct xen_hvm_create_ioreq_server {
  * the pointer pair gets read atomically:
  */
 #define HVM_IOREQSRV_BUFIOREQ_ATOMIC 2
-    uint8_t handle_bufioreq; /* IN - should server handle buffered ioreqs */
-    ioservid_t id;           /* OUT - server id */
-};
-typedef struct xen_hvm_create_ioreq_server xen_hvm_create_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_create_ioreq_server_t);
-
-/*
- * HVMOP_get_ioreq_server_info: Get all the information necessary to access
- *                              IOREQ Server <id>. 
- *
- * The emulator needs to map the synchronous ioreq structures and buffered
- * ioreq ring (if it exists) that Xen uses to request emulation. These are
- * hosted in domain <domid>'s gmfns <ioreq_pfn> and <bufioreq_pfn>
- * respectively. In addition, if the IOREQ Server is handling buffered
- * emulation requests, the emulator needs to bind to event channel
- * <bufioreq_port> to listen for them. (The event channels used for
- * synchronous emulation requests are specified in the per-CPU ioreq
- * structures in <ioreq_pfn>).
- * If the IOREQ Server is not handling buffered emulation requests then the
- * values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
- */
-#define HVMOP_get_ioreq_server_info 18
-struct xen_hvm_get_ioreq_server_info {
-    domid_t domid;                 /* IN - domain to be serviced */
-    ioservid_t id;                 /* IN - server id */
-    evtchn_port_t bufioreq_port;   /* OUT - buffered ioreq port */
-    uint64_aligned_t ioreq_pfn;    /* OUT - sync ioreq pfn */
-    uint64_aligned_t bufioreq_pfn; /* OUT - buffered ioreq pfn */
-};
-typedef struct xen_hvm_get_ioreq_server_info xen_hvm_get_ioreq_server_info_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_get_ioreq_server_info_t);
-
-/*
- * HVM_map_io_range_to_ioreq_server: Register an I/O range of domain <domid>
- *                                   for emulation by the client of IOREQ
- *                                   Server <id>
- * HVM_unmap_io_range_from_ioreq_server: Deregister an I/O range of <domid>
- *                                       for emulation by the client of IOREQ
- *                                       Server <id>
- *
- * There are three types of I/O that can be emulated: port I/O, memory accesses
- * and PCI config space accesses. The <type> field denotes which type of range
- * the <start> and <end> (inclusive) fields are specifying.
- * PCI config space ranges are specified by segment/bus/device/function values
- * which should be encoded using the HVMOP_PCI_SBDF helper macro below.
- *
- * NOTE: unless an emulation request falls entirely within a range mapped
- * by a secondary emulator, it will not be passed to that emulator.
- */
-#define HVMOP_map_io_range_to_ioreq_server 19
-#define HVMOP_unmap_io_range_from_ioreq_server 20
-struct xen_hvm_io_range {
-    domid_t domid;               /* IN - domain to be serviced */
-    ioservid_t id;               /* IN - server id */
-    uint32_t type;               /* IN - type of range */
-# define HVMOP_IO_RANGE_PORT   0 /* I/O port range */
-# define HVMOP_IO_RANGE_MEMORY 1 /* MMIO range */
-# define HVMOP_IO_RANGE_PCI    2 /* PCI segment/bus/dev/func range */
-    uint64_aligned_t start, end; /* IN - inclusive start and end of range */
-};
-typedef struct xen_hvm_io_range xen_hvm_io_range_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_io_range_t);
-
-#define HVMOP_PCI_SBDF(s,b,d,f)                 \
-       ((((s) & 0xffff) << 16) |                   \
-        (((b) & 0xff) << 8) |                      \
-        (((d) & 0x1f) << 3) |                      \
-        ((f) & 0x07))
-
-/*
- * HVMOP_destroy_ioreq_server: Destroy the IOREQ Server <id> servicing domain
- *                             <domid>.
- *
- * Any registered I/O ranges will be automatically deregistered.
- */
-#define HVMOP_destroy_ioreq_server 21
-struct xen_hvm_destroy_ioreq_server {
-    domid_t domid; /* IN - domain to be serviced */
-    ioservid_t id; /* IN - server id */
-};
-typedef struct xen_hvm_destroy_ioreq_server xen_hvm_destroy_ioreq_server_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_destroy_ioreq_server_t);
-
-/*
- * HVMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id> 
servicing
- *                               domain <domid>.
- *
- * The IOREQ Server will not be passed any emulation requests until it is in 
the
- * enabled state.
- * Note that the contents of the ioreq_pfn and bufioreq_fn (see
- * HVMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server is in
- * the enabled state.
- */
-#define HVMOP_set_ioreq_server_state 22
-struct xen_hvm_set_ioreq_server_state {
-    domid_t domid;   /* IN - domain to be serviced */
-    ioservid_t id;   /* IN - server id */
-    uint8_t enabled; /* IN - enabled? */    
-};
-typedef struct xen_hvm_set_ioreq_server_state xen_hvm_set_ioreq_server_state_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_ioreq_server_state_t);
 
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst
index 039a39c..6b48290 100644
--- a/xen/include/xlat.lst
+++ b/xen/include/xlat.lst
@@ -57,6 +57,11 @@
 ?      grant_entry_v2                  grant_table.h
 ?      gnttab_swap_grant_ref           grant_table.h
 !      dm_op_buf                       hvm/dm_op.h
+?      dm_op_create_ioreq_server       hvm/dm_op.h
+?      dm_op_destroy_ioreq_server      hvm/dm_op.h
+?      dm_op_get_ioreq_server_info     hvm/dm_op.h
+?      dm_op_ioreq_server_range        hvm/dm_op.h
+?      dm_op_set_ioreq_server_state    hvm/dm_op.h
 ?      vcpu_hvm_context                hvm/hvm_vcpu.h
 ?      vcpu_hvm_x86_32                 hvm/hvm_vcpu.h
 ?      vcpu_hvm_x86_64                 hvm/hvm_vcpu.h
diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h
index b206f5a..9f7c174 100644
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -634,12 +634,6 @@ static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG 
struct domain *d)
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_ioreq_server(XSM_DEFAULT_ARG struct domain *d, 
int op)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_mem_sharing_op(XSM_DEFAULT_ARG struct domain *d, 
struct domain *cd, int op)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h
index e2d336f..b5845a2 100644
--- a/xen/include/xsm/xsm.h
+++ b/xen/include/xsm/xsm.h
@@ -166,7 +166,6 @@ struct xsm_operations {
     int (*hvm_set_isa_irq_level) (struct domain *d);
     int (*hvm_set_pci_link_route) (struct domain *d);
     int (*hvm_inject_msi) (struct domain *d);
-    int (*hvm_ioreq_server) (struct domain *d, int op);
     int (*mem_sharing_op) (struct domain *d, struct domain *cd, int op);
     int (*apic) (struct domain *d, int cmd);
     int (*memtype) (uint32_t access);
@@ -656,11 +655,6 @@ static inline int xsm_hvm_inject_msi (xsm_default_t def, 
struct domain *d)
     return xsm_ops->hvm_inject_msi(d);
 }
 
-static inline int xsm_hvm_ioreq_server (xsm_default_t def, struct domain *d, 
int op)
-{
-    return xsm_ops->hvm_ioreq_server(d, op);
-}
-
 static inline int xsm_mem_sharing_op (xsm_default_t def, struct domain *d, 
struct domain *cd, int op)
 {
     return xsm_ops->mem_sharing_op(d, cd, op);
diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c
index a082b28..d544ec1 100644
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -149,7 +149,6 @@ void __init xsm_fixup_ops (struct xsm_operations *ops)
     set_to_dummy_if_null(ops, hvm_set_isa_irq_level);
     set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
-    set_to_dummy_if_null(ops, hvm_ioreq_server);
     set_to_dummy_if_null(ops, mem_sharing_op);
     set_to_dummy_if_null(ops, apic);
     set_to_dummy_if_null(ops, machine_memory_map);
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 383dd4f..1ce8a36 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1525,11 +1525,6 @@ static int flask_hvm_inject_msi(struct domain *d)
     return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
 }
 
-static int flask_hvm_ioreq_server(struct domain *d, int op)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__HVMCTL);
-}
-
 static int flask_mem_sharing_op(struct domain *d, struct domain *cd, int op)
 {
     int rc = current_has_perm(cd, SECCLASS_HVM, HVM__MEM_SHARING);
@@ -1808,7 +1803,6 @@ static struct xsm_operations flask_ops = {
     .hvm_set_isa_irq_level = flask_hvm_set_isa_irq_level,
     .hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
     .hvm_inject_msi = flask_hvm_inject_msi,
-    .hvm_ioreq_server = flask_hvm_ioreq_server,
     .mem_sharing_op = flask_mem_sharing_op,
     .apic = flask_apic,
     .machine_memory_map = flask_machine_memory_map,
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
https://lists.xenproject.org/xen-changelog

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.