[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v2 04/11] hvmctl: convert HVMOP_set_pci_link_route



Note that this retains the hvmop interface definitions as those had
(wrongly) been exposed to non-tool stack consumers (albeit the
operation wouldn't have succeeded when requested by a domain for
itself).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -498,27 +498,11 @@ int xc_hvm_set_isa_irq_level(
 int xc_hvm_set_pci_link_route(
     xc_interface *xch, domid_t dom, uint8_t link, uint8_t isa_irq)
 {
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_pci_link_route, arg);
-    int rc;
+    DECLARE_HVMCTL(set_pci_link_route, dom,
+                   .link    = link,
+                   .isa_irq = isa_irq);
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL )
-    {
-        PERROR("Could not allocate memory for xc_hvm_set_pci_link_route 
hypercall");
-        return -1;
-    }
-
-    arg->domid   = dom;
-    arg->link    = link;
-    arg->isa_irq = isa_irq;
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_set_pci_link_route,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
-
-    xc_hypercall_buffer_free(xch, arg);
-
-    return rc;
+    return do_hvmctl(xch, &hvmctl);
 }
 
 int xc_hvm_inject_msi(
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -108,6 +108,11 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
         rc = set_isa_irq_level(d, &op.u.set_isa_irq_level);
         break;
 
+    case XEN_HVMCTL_set_pci_link_route:
+        rc = hvm_set_pci_link_route(d, op.u.set_pci_link_route.link,
+                                    op.u.set_pci_link_route.isa_irq);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4446,39 +4446,6 @@ static void hvm_s3_resume(struct domain
     }
 }
 
-static int hvmop_set_pci_link_route(
-    XEN_GUEST_HANDLE_PARAM(xen_hvm_set_pci_link_route_t) uop)
-{
-    struct xen_hvm_set_pci_link_route op;
-    struct domain *d;
-    int rc;
-
-    if ( copy_from_guest(&op, uop, 1) )
-        return -EFAULT;
-
-    if ( (op.link > 3) || (op.isa_irq > 15) )
-        return -EINVAL;
-
-    rc = rcu_lock_remote_domain_by_id(op.domid, &d);
-    if ( rc != 0 )
-        return rc;
-
-    rc = -EINVAL;
-    if ( !is_hvm_domain(d) )
-        goto out;
-
-    rc = xsm_hvm_set_pci_link_route(XSM_DM_PRIV, d);
-    if ( rc )
-        goto out;
-
-    rc = 0;
-    hvm_set_pci_link_route(d, op.link, op.isa_irq);
-
- out:
-    rcu_unlock_domain(d);
-    return rc;
-}
-
 static int hvmop_inject_msi(
     XEN_GUEST_HANDLE_PARAM(xen_hvm_inject_msi_t) uop)
 {
@@ -5325,11 +5292,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
             guest_handle_cast(arg, xen_hvm_inject_msi_t));
         break;
 
-    case HVMOP_set_pci_link_route:
-        rc = hvmop_set_pci_link_route(
-            guest_handle_cast(arg, xen_hvm_set_pci_link_route_t));
-        break;
-
     case HVMOP_flush_tlbs:
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS;
         break;
--- a/xen/arch/x86/hvm/irq.c
+++ b/xen/arch/x86/hvm/irq.c
@@ -229,13 +229,17 @@ void hvm_assert_evtchn_irq(struct vcpu *
         hvm_set_callback_irq_level(v);
 }
 
-void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
+int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq)
 {
     struct hvm_irq *hvm_irq = &d->arch.hvm_domain.irq;
     u8 old_isa_irq;
     int i;
 
-    ASSERT((link <= 3) && (isa_irq <= 15));
+    if ( link > 3 || isa_irq > 15 )
+        return -EINVAL;
+
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
 
     spin_lock(&d->arch.hvm_domain.irq_lock);
 
@@ -273,6 +277,8 @@ void hvm_set_pci_link_route(struct domai
 
     dprintk(XENLOG_G_INFO, "Dom%u PCI link %u changed %u -> %u\n",
             d->domain_id, link, old_isa_irq, isa_irq);
+
+    return 0;
 }
 
 int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data)
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -47,16 +47,26 @@ struct xen_hvm_set_isa_irq_level {
     uint8_t  level;
 };
 
+/* XEN_HVMCTL_set_pci_link_route */
+struct xen_hvm_set_pci_link_route {
+    /* PCI link identifier (0-3). */
+    uint8_t  link;
+    /* ISA IRQ (1-15), or 0 (disable link). */
+    uint8_t  isa_irq;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
     uint32_t cmd;
 #define XEN_HVMCTL_set_pci_intx_level            1
 #define XEN_HVMCTL_set_isa_irq_level             2
+#define XEN_HVMCTL_set_pci_link_route            3
     uint64_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
         struct xen_hvm_set_isa_irq_level set_isa_irq_level;
+        struct xen_hvm_set_pci_link_route set_pci_link_route;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -66,8 +66,6 @@ struct xen_hvm_set_isa_irq_level {
 typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
 
-#endif
-
 #define HVMOP_set_pci_link_route  4
 struct xen_hvm_set_pci_link_route {
     /* Domain to be updated. */
@@ -80,6 +78,8 @@ struct xen_hvm_set_pci_link_route {
 typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
 
+#endif
+
 /* Flushes all VCPU TLBs: @arg must be NULL. */
 #define HVMOP_flush_tlbs          5
 
--- a/xen/include/xen/hvm/irq.h
+++ b/xen/include/xen/hvm/irq.h
@@ -122,7 +122,7 @@ void hvm_isa_irq_assert(
 void hvm_isa_irq_deassert(
     struct domain *d, unsigned int isa_irq);
 
-void hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
+int hvm_set_pci_link_route(struct domain *d, u8 link, u8 isa_irq);
 
 int hvm_inject_msi(struct domain *d, uint64_t addr, uint32_t data);
 
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -609,12 +609,6 @@ static XSM_INLINE int xsm_shadow_control
     return xsm_default_action(action, current->domain, d);
 }
 
-static XSM_INLINE int xsm_hvm_set_pci_link_route(XSM_DEFAULT_ARG struct domain 
*d)
-{
-    XSM_ASSERT_ACTION(XSM_DM_PRIV);
-    return xsm_default_action(action, current->domain, d);
-}
-
 static XSM_INLINE int xsm_hvm_inject_msi(XSM_DEFAULT_ARG struct domain *d)
 {
     XSM_ASSERT_ACTION(XSM_DM_PRIV);
--- a/xen/xsm/dummy.c
+++ b/xen/xsm/dummy.c
@@ -145,7 +145,6 @@ void xsm_fixup_ops (struct xsm_operation
 #ifdef CONFIG_X86
     set_to_dummy_if_null(ops, do_mca);
     set_to_dummy_if_null(ops, shadow_control);
-    set_to_dummy_if_null(ops, hvm_set_pci_link_route);
     set_to_dummy_if_null(ops, hvm_inject_msi);
     set_to_dummy_if_null(ops, hvm_ioreq_server);
     set_to_dummy_if_null(ops, mem_sharing_op);
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1196,6 +1196,9 @@ static int flask_hvm_control(struct doma
     case XEN_HVMCTL_set_isa_irq_level:
         perm = HVM__IRQLEVEL;
         break;
+    case XEN_HVMCTL_set_pci_link_route:
+        perm = HVM__PCIROUTE;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
@@ -1520,11 +1523,6 @@ static int flask_ioport_mapping(struct d
     return flask_ioport_permission(d, start, end, access);
 }
 
-static int flask_hvm_set_pci_link_route(struct domain *d)
-{
-    return current_has_perm(d, SECCLASS_HVM, HVM__PCIROUTE);
-}
-
 static int flask_hvm_inject_msi(struct domain *d)
 {
     return current_has_perm(d, SECCLASS_HVM, HVM__SEND_IRQ);
@@ -1803,7 +1801,6 @@ static struct xsm_operations flask_ops =
 #ifdef CONFIG_X86
     .do_mca = flask_do_mca,
     .shadow_control = flask_shadow_control,
-    .hvm_set_pci_link_route = flask_hvm_set_pci_link_route,
     .hvm_inject_msi = flask_hvm_inject_msi,
     .hvm_ioreq_server = flask_hvm_ioreq_server,
     .mem_sharing_op = flask_mem_sharing_op,
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -265,7 +265,7 @@ class hvm
     pcilevel
 # XEN_HVMCTL_set_isa_irq_level
     irqlevel
-# HVMOP_set_pci_link_route
+# XEN_HVMCTL_set_pci_link_route
     pciroute
     bind_irq
 # XEN_DOMCTL_pin_mem_cacheattr


Attachment: hvmctl-03.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.