[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 05/11] hvmctl: convert HVMOP_track_dirty_vram



Also limiting "nr" at the libxc level to 32 bits (the high 32 bits of
the previous 64-bit parameter got ignore so far).

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1613,7 +1613,7 @@ int xc_hvm_inject_msi(
  */
 int xc_hvm_track_dirty_vram(
     xc_interface *xch, domid_t dom,
-    uint64_t first_pfn, uint64_t nr,
+    uint64_t first_gfn, uint32_t nr,
     unsigned long *bitmap);
 
 /*
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -533,33 +533,27 @@ int xc_hvm_inject_msi(
 
 int xc_hvm_track_dirty_vram(
     xc_interface *xch, domid_t dom,
-    uint64_t first_pfn, uint64_t nr,
+    uint64_t first_gfn, uint32_t nr,
     unsigned long *dirty_bitmap)
 {
+    DECLARE_HVMCTL(track_dirty_vram, dom,
+                   .first_gfn = first_gfn,
+                   .nr        = nr);
     DECLARE_HYPERCALL_BOUNCE(dirty_bitmap, (nr+7) / 8, 
XC_HYPERCALL_BUFFER_BOUNCE_OUT);
-    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_track_dirty_vram, arg);
     int rc;
 
-    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
-    if ( arg == NULL || xc_hypercall_bounce_pre(xch, dirty_bitmap) )
+    if ( xc_hypercall_bounce_pre(xch, dirty_bitmap) )
     {
         PERROR("Could not bounce memory for xc_hvm_track_dirty_vram 
hypercall");
-        rc = -1;
-        goto out;
+        return -1;
     }
 
-    arg->domid     = dom;
-    arg->first_pfn = first_pfn;
-    arg->nr        = nr;
-    set_xen_guest_handle(arg->dirty_bitmap, dirty_bitmap);
-
-    rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op,
-                  HVMOP_track_dirty_vram,
-                  HYPERCALL_BUFFER_AS_ARG(arg));
+    set_xen_guest_handle(hvmctl.u.track_dirty_vram.dirty_bitmap, dirty_bitmap);
+
+    rc = do_hvmctl(xch, &hvmctl);
 
-out:
-    xc_hypercall_buffer_free(xch, arg);
     xc_hypercall_bounce_post(xch, dirty_bitmap);
+
     return rc;
 }
 
--- a/xen/arch/x86/hvm/control.c
+++ b/xen/arch/x86/hvm/control.c
@@ -17,6 +17,8 @@
 #include <xen/hypercall.h>
 #include <xen/guest_access.h>
 #include <xen/sched.h>
+#include <asm/hap.h>
+#include <asm/shadow.h>
 #include <xsm/xsm.h>
 
 static int set_pci_intx_level(struct domain *d,
@@ -67,6 +69,27 @@ static int set_isa_irq_level(struct doma
     return 0;
 }
 
+static int track_dirty_vram(struct domain *d,
+                            const struct xen_hvm_track_dirty_vram *op)
+{
+    if ( !is_hvm_domain(d) )
+        return -EINVAL;
+
+    if ( op->rsvd || op->nr > (GB(1) >> PAGE_SHIFT) )
+        return -EINVAL;
+
+    if ( d->is_dying )
+        return -ESRCH;
+
+    if ( !d->max_vcpus || !d->vcpu[0] )
+        return -EINVAL;
+
+    return shadow_mode_enabled(d)
+           ? shadow_track_dirty_vram(d, op->first_gfn, op->nr,
+                                     op->dirty_bitmap)
+           : hap_track_dirty_vram(d, op->first_gfn, op->nr, op->dirty_bitmap);
+}
+
 /*
  * Note that this value is effectively part of the ABI, even if we don't need
  * to make it a formal part of it.  Hence this value may only be changed if
@@ -125,6 +148,10 @@ long do_hvmctl(XEN_GUEST_HANDLE_PARAM(xe
                                     op.u.set_pci_link_route.isa_irq);
         break;
 
+    case XEN_HVMCTL_track_dirty_vram:
+        rc = track_dirty_vram(d, &op.u.track_dirty_vram);
+        break;
+
     default:
         rc = -EOPNOTSUPP;
         break;
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -5296,47 +5296,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
         rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -ENOSYS;
         break;
 
-    case HVMOP_track_dirty_vram:
-    {
-        struct xen_hvm_track_dirty_vram a;
-        struct domain *d;
-
-        if ( copy_from_guest(&a, arg, 1) )
-            return -EFAULT;
-
-        rc = rcu_lock_remote_domain_by_id(a.domid, &d);
-        if ( rc != 0 )
-            return rc;
-
-        rc = -EINVAL;
-        if ( !is_hvm_domain(d) )
-            goto tdv_fail;
-
-        if ( a.nr > GB(1) >> PAGE_SHIFT )
-            goto tdv_fail;
-
-        rc = xsm_hvm_control(XSM_DM_PRIV, d, op);
-        if ( rc )
-            goto tdv_fail;
-
-        rc = -ESRCH;
-        if ( d->is_dying )
-            goto tdv_fail;
-
-        rc = -EINVAL;
-        if ( d->vcpu == NULL || d->vcpu[0] == NULL )
-            goto tdv_fail;
-
-        if ( shadow_mode_enabled(d) )
-            rc = shadow_track_dirty_vram(d, a.first_pfn, a.nr, a.dirty_bitmap);
-        else
-            rc = hap_track_dirty_vram(d, a.first_pfn, a.nr, a.dirty_bitmap);
-
-    tdv_fail:
-        rcu_unlock_domain(d);
-        break;
-    }
-
     case HVMOP_modified_memory:
     {
         struct xen_hvm_modified_memory a;
--- a/xen/include/public/hvm/control.h
+++ b/xen/include/public/hvm/control.h
@@ -55,6 +55,18 @@ struct xen_hvm_set_pci_link_route {
     uint8_t  isa_irq;
 };
 
+/* XEN_HVMCTL_track_dirty_vram */
+struct xen_hvm_track_dirty_vram {
+    /* Number of pages to track. */
+    uint32_t nr;
+    uint32_t rsvd;
+    /* First GFN to track. */
+    uint64_aligned_t first_gfn;
+    /* OUT variable. */
+    /* Dirty bitmap buffer. */
+    XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
+};
+
 struct xen_hvmctl {
     uint16_t interface_version;    /* XEN_HVMCTL_INTERFACE_VERSION */
     domid_t domain;
@@ -62,11 +74,13 @@ struct xen_hvmctl {
 #define XEN_HVMCTL_set_pci_intx_level            1
 #define XEN_HVMCTL_set_isa_irq_level             2
 #define XEN_HVMCTL_set_pci_link_route            3
+#define XEN_HVMCTL_track_dirty_vram              4
     uint16_t opaque;               /* Must be zero on initial invocation. */
     union {
         struct xen_hvm_set_pci_intx_level set_pci_intx_level;
         struct xen_hvm_set_isa_irq_level set_isa_irq_level;
         struct xen_hvm_set_pci_link_route set_pci_link_route;
+        struct xen_hvm_track_dirty_vram track_dirty_vram;
         uint8_t pad[120];
     } u;
 };
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -98,22 +98,6 @@ typedef enum {
 /* Following tools-only interfaces may change in future. */
 #if defined(__XEN__) || defined(__XEN_TOOLS__)
 
-/* Track dirty VRAM. */
-#define HVMOP_track_dirty_vram    6
-struct xen_hvm_track_dirty_vram {
-    /* Domain to be tracked. */
-    domid_t  domid;
-    /* Number of pages to track. */
-    uint32_t nr;
-    /* First pfn to track. */
-    uint64_aligned_t first_pfn;
-    /* OUT variable. */
-    /* Dirty bitmap buffer. */
-    XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
-};
-typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
-
 /* Notify that some pages got modified by the Device Model. */
 #define HVMOP_modified_memory    7
 struct xen_hvm_modified_memory {
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -1174,9 +1174,6 @@ static int flask_hvm_param(struct domain
     case HVMOP_get_param:
         perm = HVM__GETPARAM;
         break;
-    case HVMOP_track_dirty_vram:
-        perm = HVM__TRACKDIRTYVRAM;
-        break;
     default:
         perm = HVM__HVMCTL;
     }
@@ -1199,6 +1196,9 @@ static int flask_hvm_control(struct doma
     case XEN_HVMCTL_set_pci_link_route:
         perm = HVM__PCIROUTE;
         break;
+    case XEN_HVMCTL_track_dirty_vram:
+        perm = HVM__TRACKDIRTYVRAM;
+        break;
     default:
         perm = HVM__HVMCTL;
         break;
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -270,7 +270,7 @@ class hvm
     bind_irq
 # XEN_DOMCTL_pin_mem_cacheattr
     cacheattr
-# HVMOP_track_dirty_vram
+# XEN_HVMCTL_track_dirty_vram
     trackdirtyvram
 # HVMOP_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type,
 # HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying,


Attachment: hvmctl-04.patch
Description: Text document

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.