[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 3/8] dm_op: convert HVMOP_track_dirty_vram
The handle type passed to the underlying shadow and hap functions is changed for compatibility with the new hypercall buffer. NOTE: This patch also modifies the type of the 'nr' parameter of xc_hvm_track_dirty_vram() from uint64_t to uint32_t. In practice the value passed was always truncated to 32 bits. Suggested-by: Jan Beulich <jbeulich@xxxxxxxx> Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx> Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Acked-by: Tim Deegan <tim@xxxxxxx> Acked-by: Daniel De Graaf <dgdegra@xxxxxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- Cc: Ian Jackson <ian.jackson@xxxxxxxxxxxxx> v4: - Knock-on changes from compat code in dm.c. Not adding Jan's R-b since the patch has fundamentally changed. v3: - Check d->max_vcpus rather than d->vcpu, as requested by Jan. - The handle type changes (from uint8 to void) are still necessary, hence omitting Jan's R-b until this is confirmed to be acceptable. v2: - Addressed several comments from Jan. --- tools/flask/policy/modules/xen.if | 4 ++-- tools/libxc/include/xenctrl.h | 2 +- tools/libxc/xc_misc.c | 32 +++++++++-------------------- xen/arch/x86/hvm/dm.c | 40 +++++++++++++++++++++++++++++++++++- xen/arch/x86/hvm/hvm.c | 41 ------------------------------------- xen/arch/x86/mm/hap/hap.c | 2 +- xen/arch/x86/mm/shadow/common.c | 2 +- xen/include/asm-x86/hap.h | 2 +- xen/include/asm-x86/shadow.h | 2 +- xen/include/public/hvm/dm_op.h | 18 ++++++++++++++++ xen/include/public/hvm/hvm_op.h | 16 --------------- xen/xsm/flask/hooks.c | 3 --- xen/xsm/flask/policy/access_vectors | 2 -- 13 files changed, 74 insertions(+), 92 deletions(-) diff --git a/tools/flask/policy/modules/xen.if b/tools/flask/policy/modules/xen.if index f9254c2..45e5b5f 100644 --- a/tools/flask/policy/modules/xen.if +++ b/tools/flask/policy/modules/xen.if @@ -58,7 +58,7 @@ define(`create_domain_common', ` allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op updatemp }; allow $1 $2:grant setup; allow $1 $2:hvm { cacheattr getparam hvmctl irqlevel pciroute sethvmc - setparam pcilevel trackdirtyvram nested altp2mhvm altp2mhvm_op send_irq }; + setparam pcilevel nested altp2mhvm altp2mhvm_op send_irq }; ') # create_domain(priv, target) @@ -151,7 +151,7 @@ define(`device_model', ` allow $1 $2_target:domain { getdomaininfo shutdown }; allow $1 $2_target:mmu { map_read map_write adjust physmap target_hack }; - allow $1 $2_target:hvm { getparam setparam trackdirtyvram hvmctl irqlevel pciroute pcilevel cacheattr send_irq dm }; + allow $1 $2_target:hvm { getparam setparam hvmctl irqlevel pciroute pcilevel cacheattr send_irq dm }; ') # make_device_model(priv, dm_dom, hvm_dom) diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h index 2ba46d7..c7ee412 100644 --- a/tools/libxc/include/xenctrl.h +++ b/tools/libxc/include/xenctrl.h @@ -1620,7 +1620,7 @@ int xc_hvm_inject_msi( */ int xc_hvm_track_dirty_vram( xc_interface *xch, domid_t dom, - uint64_t first_pfn, uint64_t nr, + uint64_t first_pfn, uint32_t nr, unsigned long *bitmap); /* diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c index 06e90de..4c41d41 100644 --- a/tools/libxc/xc_misc.c +++ b/tools/libxc/xc_misc.c @@ -581,34 +581,22 @@ int xc_hvm_inject_msi( int xc_hvm_track_dirty_vram( xc_interface *xch, domid_t dom, - uint64_t first_pfn, uint64_t nr, + uint64_t first_pfn, uint32_t nr, unsigned long *dirty_bitmap) { - DECLARE_HYPERCALL_BOUNCE(dirty_bitmap, (nr+7) / 8, XC_HYPERCALL_BUFFER_BOUNCE_OUT); - DECLARE_HYPERCALL_BUFFER(struct xen_hvm_track_dirty_vram, arg); - int rc; + struct xen_dm_op op; + struct xen_dm_op_track_dirty_vram *data; - arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg)); - if ( arg == NULL || xc_hypercall_bounce_pre(xch, dirty_bitmap) ) - { - PERROR("Could not bounce memory for xc_hvm_track_dirty_vram hypercall"); - rc = -1; - goto out; - } + memset(&op, 0, sizeof(op)); - arg->domid = dom; - arg->first_pfn = first_pfn; - arg->nr = nr; - set_xen_guest_handle(arg->dirty_bitmap, dirty_bitmap); + op.op = XEN_DMOP_track_dirty_vram; + data = &op.u.track_dirty_vram; - rc = xencall2(xch->xcall, __HYPERVISOR_hvm_op, - HVMOP_track_dirty_vram, - HYPERCALL_BUFFER_AS_ARG(arg)); + data->first_pfn = first_pfn; + data->nr = nr; -out: - xc_hypercall_buffer_free(xch, arg); - xc_hypercall_bounce_post(xch, dirty_bitmap); - return rc; + return do_dm_op(xch, dom, 2, &op, sizeof(op), + dirty_bitmap, (nr + 7) / 8); } int xc_hvm_modified_memory( diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c index 72cb33a..b0908f4 100644 --- a/xen/arch/x86/hvm/dm.c +++ b/xen/arch/x86/hvm/dm.c @@ -18,7 +18,9 @@ #include <xen/hypercall.h> #include <xen/sched.h> +#include <asm/hap.h> #include <asm/hvm/ioreq.h> +#include <asm/shadow.h> #include <xsm/xsm.h> @@ -42,6 +44,26 @@ static bool copy_buf_to_guest(xen_dm_op_buf_t bufs[], return !copy_to_guest(bufs[idx].h, src, bufs[idx].size); } +static int track_dirty_vram(struct domain *d, xen_pfn_t first_pfn, + unsigned int nr, struct xen_dm_op_buf *buf) +{ + if ( nr > (GB(1) >> PAGE_SHIFT) ) + return -EINVAL; + + if ( d->is_dying ) + return -ESRCH; + + if ( !d->max_vcpus || !d->vcpu[0] ) + return -EINVAL; + + if ( ((nr + 7) / 8) > buf->size ) + return -EINVAL; + + return shadow_mode_enabled(d) ? + shadow_track_dirty_vram(d, first_pfn, nr, buf->h) : + hap_track_dirty_vram(d, first_pfn, nr, buf->h); +} + static int dm_op(domid_t domid, unsigned int nr_bufs, xen_dm_op_buf_t bufs[]) @@ -163,6 +185,22 @@ static int dm_op(domid_t domid, break; } + case XEN_DMOP_track_dirty_vram: + { + const struct xen_dm_op_track_dirty_vram *data = + &op.u.track_dirty_vram; + + rc = -EINVAL; + if ( data->pad ) + break; + + if ( nr_bufs < 2 ) + break; + + rc = track_dirty_vram(d, data->first_pfn, data->nr, &bufs[1]); + break; + } + default: rc = -EOPNOTSUPP; break; @@ -179,7 +217,7 @@ static int dm_op(domid_t domid, return rc; } -#define MAX_NR_BUFS 1 +#define MAX_NR_BUFS 2 int compat_dm_op(domid_t domid, unsigned int nr_bufs, diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index fd0e18a..5a5b772 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -5134,47 +5134,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) rc = guest_handle_is_null(arg) ? hvmop_flush_tlb_all() : -EINVAL; break; - case HVMOP_track_dirty_vram: - { - struct xen_hvm_track_dirty_vram a; - struct domain *d; - - if ( copy_from_guest(&a, arg, 1) ) - return -EFAULT; - - rc = rcu_lock_remote_domain_by_id(a.domid, &d); - if ( rc != 0 ) - return rc; - - rc = -EINVAL; - if ( !is_hvm_domain(d) ) - goto tdv_fail; - - if ( a.nr > GB(1) >> PAGE_SHIFT ) - goto tdv_fail; - - rc = xsm_hvm_control(XSM_DM_PRIV, d, op); - if ( rc ) - goto tdv_fail; - - rc = -ESRCH; - if ( d->is_dying ) - goto tdv_fail; - - rc = -EINVAL; - if ( d->vcpu == NULL || d->vcpu[0] == NULL ) - goto tdv_fail; - - if ( shadow_mode_enabled(d) ) - rc = shadow_track_dirty_vram(d, a.first_pfn, a.nr, a.dirty_bitmap); - else - rc = hap_track_dirty_vram(d, a.first_pfn, a.nr, a.dirty_bitmap); - - tdv_fail: - rcu_unlock_domain(d); - break; - } - case HVMOP_modified_memory: { struct xen_hvm_modified_memory a; diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c index e6dc088..6dbb3cc 100644 --- a/xen/arch/x86/mm/hap/hap.c +++ b/xen/arch/x86/mm/hap/hap.c @@ -68,7 +68,7 @@ int hap_track_dirty_vram(struct domain *d, unsigned long begin_pfn, unsigned long nr, - XEN_GUEST_HANDLE_64(uint8) guest_dirty_bitmap) + XEN_GUEST_HANDLE_PARAM(void) guest_dirty_bitmap) { long rc = 0; struct sh_dirty_vram *dirty_vram; diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 4113351..0079238 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -3604,7 +3604,7 @@ static void sh_clean_dirty_bitmap(struct domain *d) int shadow_track_dirty_vram(struct domain *d, unsigned long begin_pfn, unsigned long nr, - XEN_GUEST_HANDLE_64(uint8) guest_dirty_bitmap) + XEN_GUEST_HANDLE_PARAM(void) guest_dirty_bitmap) { int rc = 0; unsigned long end_pfn = begin_pfn + nr; diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h index dedb4b1..88587c4 100644 --- a/xen/include/asm-x86/hap.h +++ b/xen/include/asm-x86/hap.h @@ -43,7 +43,7 @@ void hap_vcpu_init(struct vcpu *v); int hap_track_dirty_vram(struct domain *d, unsigned long begin_pfn, unsigned long nr, - XEN_GUEST_HANDLE_64(uint8) dirty_bitmap); + XEN_GUEST_HANDLE_PARAM(void) dirty_bitmap); extern const struct paging_mode *hap_paging_get_mode(struct vcpu *); int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted); diff --git a/xen/include/asm-x86/shadow.h b/xen/include/asm-x86/shadow.h index bac952f..7e1ed3b 100644 --- a/xen/include/asm-x86/shadow.h +++ b/xen/include/asm-x86/shadow.h @@ -63,7 +63,7 @@ int shadow_enable(struct domain *d, u32 mode); int shadow_track_dirty_vram(struct domain *d, unsigned long first_pfn, unsigned long nr, - XEN_GUEST_HANDLE_64(uint8) dirty_bitmap); + XEN_GUEST_HANDLE_PARAM(void) dirty_bitmap); /* Handler for shadow control ops: operations from user-space to enable * and disable ephemeral shadow modes (test mode and log-dirty mode) and diff --git a/xen/include/public/hvm/dm_op.h b/xen/include/public/hvm/dm_op.h index 980322f..5b8b0b4 100644 --- a/xen/include/public/hvm/dm_op.h +++ b/xen/include/public/hvm/dm_op.h @@ -176,6 +176,23 @@ struct xen_dm_op_destroy_ioreq_server { uint16_t pad; }; +/* + * XEN_DMOP_track_dirty_vram: Track modifications to the specified pfn + * range. + * + * NOTE: The bitmap passed back to the caller is passed in a + * secondary buffer. + */ +#define XEN_DMOP_track_dirty_vram 7 + +struct xen_dm_op_track_dirty_vram { + /* IN - number of pages to be tracked */ + uint32_t nr; + uint32_t pad; + /* IN - first pfn to track */ + uint64_aligned_t first_pfn; +}; + struct xen_dm_op { uint32_t op; uint32_t pad; @@ -186,6 +203,7 @@ struct xen_dm_op { struct xen_dm_op_ioreq_server_range unmap_io_range_from_ioreq_server; struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state; struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server; + struct xen_dm_op_track_dirty_vram track_dirty_vram; } u; }; diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h index 6fcd86d..47e836c 100644 --- a/xen/include/public/hvm/hvm_op.h +++ b/xen/include/public/hvm/hvm_op.h @@ -95,22 +95,6 @@ typedef enum { /* Following tools-only interfaces may change in future. */ #if defined(__XEN__) || defined(__XEN_TOOLS__) -/* Track dirty VRAM. */ -#define HVMOP_track_dirty_vram 6 -struct xen_hvm_track_dirty_vram { - /* Domain to be tracked. */ - domid_t domid; - /* Number of pages to track. */ - uint32_t nr; - /* First pfn to track. */ - uint64_aligned_t first_pfn; - /* OUT variable. */ - /* Dirty bitmap buffer. */ - XEN_GUEST_HANDLE_64(uint8) dirty_bitmap; -}; -typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t; -DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t); - /* Notify that some pages got modified by the Device Model. */ #define HVMOP_modified_memory 7 struct xen_hvm_modified_memory { diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c index 1ce8a36..a4272d7 100644 --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -1180,9 +1180,6 @@ static int flask_hvm_param(struct domain *d, unsigned long op) case HVMOP_get_param: perm = HVM__GETPARAM; break; - case HVMOP_track_dirty_vram: - perm = HVM__TRACKDIRTYVRAM; - break; default: perm = HVM__HVMCTL; } diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors index 92e6da9..47ce589 100644 --- a/xen/xsm/flask/policy/access_vectors +++ b/xen/xsm/flask/policy/access_vectors @@ -268,8 +268,6 @@ class hvm bind_irq # XEN_DOMCTL_pin_mem_cacheattr cacheattr -# HVMOP_track_dirty_vram - trackdirtyvram # HVMOP_modified_memory, HVMOP_get_mem_type, HVMOP_set_mem_type, # HVMOP_set_mem_access, HVMOP_get_mem_access, HVMOP_pagetable_dying, # HVMOP_inject_trap -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |