[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3] x86/hvm: unify HVM and PVH hypercall tables.
Stage one of many in merging PVH and HVM code in the hypervisor. This exposes a few new hypercalls to HVM guests, all of which were already available to PVH ones: - XENMEM_memory_map / XENMEM_machine_memory_map / XENMEM_machphys_mapping: These are basically harmless, if a bit useless to plain HVM. - VCPUOP_send_nmi / VCPUOP_initialise / VCPUOP[_is]_up / VCPUOP_down This will eventually let HVM guests bring up APs the way PVH ones do. For now, the VCPUOP_initialise paths are still gated on is_pvh. - VCPUOP_get_physid Harmless. - __HYPERVISOR_platform_op (XSM_PRIV callers only). - __HYPERVISOR_mmuext_op. The pagetable manipulation MMUEXT ops are already denied (or no-ops) to paging_mode_refcounts() domains; the baseptr ones are already denied to paging_mode_translate() domains. I have restricted MMUEXT[UN]MARK_SUPER to !paging_mode_refcounts() domains as well, as I can see no need for them in PVH. I have also restricted MMUEXT_CLEAR_PAGE / MMUEXT_COPY_PAGE to PV domains only, as there is no need for them in HVM/PVH ones and they would lead to complication with sharing/paging operations. That leaves TLB and cache flush operations, which are OK. - PHYSDEVOP_* (only for hardware control domains). Also make ops that touch PV vcpu state (PHYSDEVOP_set_iopl and PHYSDEVOP_set_iobitmap) gate on is_pv rather than !is_pvh. PVH guests can also see a few hypercalls that they couldn't before, but which were already available to HVM guests: - __HYPERVISOR_set_timer_op - __HYPERVISOR_tmem_op Signed-off-by: Tim Deegan <tim@xxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> Cc: Keir Fraser <keir@xxxxxxx> Cc: Mukesh Rathor <mukesh.rathor@xxxxxxxxxx> Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx> --- v2: restrict MMUEXT_CLEAR_PAGE / MMUEXT_COPY_PAGE; reword to correcly state that some MMUEXT ops are noops; and use if() rather than one-clause switch() in a few places. v3: use COMPAT_CALL() for 32bit platform op. --- xen/arch/x86/hvm/hvm.c | 129 +++++++--------------------------------- xen/arch/x86/mm.c | 24 ++++++++ xen/arch/x86/physdev.c | 4 +- xen/arch/x86/x86_64/compat/mm.c | 2 - xen/include/asm-x86/hypercall.h | 10 ++++ 5 files changed, 57 insertions(+), 112 deletions(-) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index b69f164..1579cb1 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3844,20 +3844,12 @@ static long hvm_grant_table_op( static long hvm_memory_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { - long rc; + long rc = do_memory_op(cmd, arg); - switch ( cmd & MEMOP_CMD_MASK ) - { - case XENMEM_memory_map: - case XENMEM_machine_memory_map: - case XENMEM_machphys_mapping: - return -ENOSYS; - case XENMEM_decrease_reservation: - rc = do_memory_op(cmd, arg); + if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation ) current->domain->arch.hvm_domain.qemu_mapcache_invalidate = 1; - return rc; - } - return do_memory_op(cmd, arg); + + return rc; } static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) @@ -3865,7 +3857,7 @@ static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) switch ( cmd ) { default: - if ( !is_pvh_vcpu(current) || !is_hardware_domain(current->domain) ) + if ( !is_hardware_domain(current->domain) ) return -ENOSYS; /* fall through */ case PHYSDEVOP_map_pirq: @@ -3877,31 +3869,6 @@ static long hvm_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) } } -static long hvm_vcpu_op( - int cmd, int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) -{ - long rc; - - switch ( cmd ) - { - case VCPUOP_register_runstate_memory_area: - case VCPUOP_get_runstate_info: - case VCPUOP_set_periodic_timer: - case VCPUOP_stop_periodic_timer: - case VCPUOP_set_singleshot_timer: - case VCPUOP_stop_singleshot_timer: - case VCPUOP_register_vcpu_info: - case VCPUOP_register_vcpu_time_memory_area: - rc = do_vcpu_op(cmd, vcpuid, arg); - break; - default: - rc = -ENOSYS; - break; - } - - return rc; -} - typedef unsigned long hvm_hypercall_t( unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long); @@ -3920,43 +3887,10 @@ static long hvm_grant_table_op_compat32(unsigned int cmd, static long hvm_memory_op_compat32(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) { - int rc; + int rc = compat_memory_op(cmd, arg); - switch ( cmd & MEMOP_CMD_MASK ) - { - case XENMEM_memory_map: - case XENMEM_machine_memory_map: - case XENMEM_machphys_mapping: - return -ENOSYS; - case XENMEM_decrease_reservation: - rc = compat_memory_op(cmd, arg); + if ( (cmd & MEMOP_CMD_MASK) == XENMEM_decrease_reservation ) current->domain->arch.hvm_domain.qemu_mapcache_invalidate = 1; - return rc; - } - return compat_memory_op(cmd, arg); -} - -static long hvm_vcpu_op_compat32( - int cmd, int vcpuid, XEN_GUEST_HANDLE_PARAM(void) arg) -{ - long rc; - - switch ( cmd ) - { - case VCPUOP_register_runstate_memory_area: - case VCPUOP_get_runstate_info: - case VCPUOP_set_periodic_timer: - case VCPUOP_stop_periodic_timer: - case VCPUOP_set_singleshot_timer: - case VCPUOP_stop_singleshot_timer: - case VCPUOP_register_vcpu_info: - case VCPUOP_register_vcpu_time_memory_area: - rc = compat_vcpu_op(cmd, vcpuid, arg); - break; - default: - rc = -ENOSYS; - break; - } return rc; } @@ -3966,27 +3900,29 @@ static long hvm_physdev_op_compat32( { switch ( cmd ) { + default: + if ( !is_hardware_domain(current->domain) ) + return -ENOSYS; + /* fall through */ case PHYSDEVOP_map_pirq: case PHYSDEVOP_unmap_pirq: case PHYSDEVOP_eoi: case PHYSDEVOP_irq_status_query: case PHYSDEVOP_get_free_pirq: return compat_physdev_op(cmd, arg); - break; - default: - return -ENOSYS; - break; } } static hvm_hypercall_t *const hvm_hypercall64_table[NR_hypercalls] = { [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op, [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op, - [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op, [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op, + HYPERCALL(platform_op), HYPERCALL(xen_version), HYPERCALL(console_io), HYPERCALL(event_channel_op), + HYPERCALL(vcpu_op), + HYPERCALL(mmuext_op), HYPERCALL(sched_op), HYPERCALL(set_timer_op), HYPERCALL(xsm_op), @@ -4002,11 +3938,13 @@ static hvm_hypercall_t *const hvm_hypercall64_table[NR_hypercalls] = { static hvm_hypercall_t *const hvm_hypercall32_table[NR_hypercalls] = { [ __HYPERVISOR_memory_op ] = (hvm_hypercall_t *)hvm_memory_op_compat32, [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op_compat32, - [ __HYPERVISOR_vcpu_op ] = (hvm_hypercall_t *)hvm_vcpu_op_compat32, [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op_compat32, + COMPAT_CALL(platform_op), COMPAT_CALL(xen_version), HYPERCALL(console_io), HYPERCALL(event_channel_op), + COMPAT_CALL(vcpu_op), + COMPAT_CALL(mmuext_op), COMPAT_CALL(sched_op), COMPAT_CALL(set_timer_op), HYPERCALL(xsm_op), @@ -4016,24 +3954,6 @@ static hvm_hypercall_t *const hvm_hypercall32_table[NR_hypercalls] = { HYPERCALL(tmem_op) }; -/* PVH 32bitfixme. */ -static hvm_hypercall_t *const pvh_hypercall64_table[NR_hypercalls] = { - HYPERCALL(platform_op), - HYPERCALL(memory_op), - HYPERCALL(xen_version), - HYPERCALL(console_io), - [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op, - HYPERCALL(vcpu_op), - HYPERCALL(mmuext_op), - HYPERCALL(xsm_op), - HYPERCALL(sched_op), - HYPERCALL(event_channel_op), - [ __HYPERVISOR_physdev_op ] = (hvm_hypercall_t *)hvm_physdev_op, - HYPERCALL(hvm_op), - HYPERCALL(sysctl), - HYPERCALL(domctl) -}; - int hvm_do_hypercall(struct cpu_user_regs *regs) { struct vcpu *curr = current; @@ -4060,9 +3980,7 @@ int hvm_do_hypercall(struct cpu_user_regs *regs) if ( (eax & 0x80000000) && is_viridian_domain(curr->domain) ) return viridian_hypercall(regs); - if ( (eax >= NR_hypercalls) || - (is_pvh_vcpu(curr) ? !pvh_hypercall64_table[eax] - : !hvm_hypercall32_table[eax]) ) + if ( (eax >= NR_hypercalls) || !hvm_hypercall32_table[eax] ) { regs->eax = -ENOSYS; return HVM_HCALL_completed; @@ -4077,14 +3995,9 @@ int hvm_do_hypercall(struct cpu_user_regs *regs) regs->r10, regs->r8, regs->r9); curr->arch.hvm_vcpu.hcall_64bit = 1; - if ( is_pvh_vcpu(curr) ) - regs->rax = pvh_hypercall64_table[eax](regs->rdi, regs->rsi, - regs->rdx, regs->r10, - regs->r8, regs->r9); - else - regs->rax = hvm_hypercall64_table[eax](regs->rdi, regs->rsi, - regs->rdx, regs->r10, - regs->r8, regs->r9); + regs->rax = hvm_hypercall64_table[eax](regs->rdi, regs->rsi, + regs->rdx, regs->r10, + regs->r8, regs->r9); curr->arch.hvm_vcpu.hcall_64bit = 0; } else diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 1a8a5e0..a263d26 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -3253,6 +3253,12 @@ long do_mmuext_op( case MMUEXT_CLEAR_PAGE: { struct page_info *page; + if ( !is_pv_vcpu(current) ) + { + okay = 0; + break; + } + page = get_page_from_gfn(d, op.arg1.mfn, NULL, P2M_ALLOC); if ( !page || !get_page_type(page, PGT_writable_page) ) { @@ -3276,6 +3282,12 @@ long do_mmuext_op( { struct page_info *src_page, *dst_page; + if ( !is_pv_vcpu(current) ) + { + okay = 0; + break; + } + src_page = get_page_from_gfn(d, op.arg2.src_mfn, NULL, P2M_ALLOC); if ( unlikely(!src_page) ) { @@ -3310,6 +3322,12 @@ long do_mmuext_op( unsigned long mfn; struct spage_info *spage; + if ( paging_mode_refcounts(pg_owner) ) + { + okay = 0; + break; + } + mfn = op.arg1.mfn; if ( mfn & (L1_PAGETABLE_ENTRIES-1) ) { @@ -3336,6 +3354,12 @@ long do_mmuext_op( unsigned long mfn; struct spage_info *spage; + if ( paging_mode_refcounts(pg_owner) ) + { + okay = 0; + break; + } + mfn = op.arg1.mfn; if ( mfn & (L1_PAGETABLE_ENTRIES-1) ) { diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c index f178315..a2d2b96 100644 --- a/xen/arch/x86/physdev.c +++ b/xen/arch/x86/physdev.c @@ -520,7 +520,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) struct physdev_set_iopl set_iopl; ret = -ENOSYS; - if ( is_pvh_vcpu(current) ) + if ( !is_pv_vcpu(current) ) break; ret = -EFAULT; @@ -538,7 +538,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) struct physdev_set_iobitmap set_iobitmap; ret = -ENOSYS; - if ( is_pvh_vcpu(current) ) + if ( !is_pv_vcpu(current) ) break; ret = -EFAULT; diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c index 69c6195..3fa90aa 100644 --- a/xen/arch/x86/x86_64/compat/mm.c +++ b/xen/arch/x86/x86_64/compat/mm.c @@ -236,8 +236,6 @@ int compat_update_va_mapping_otherdomain(unsigned long va, u32 lo, u32 hi, return do_update_va_mapping_otherdomain(va, lo | ((u64)hi << 32), flags, domid); } -DEFINE_XEN_GUEST_HANDLE(mmuext_op_compat_t); - int compat_mmuext_op(XEN_GUEST_HANDLE_PARAM(mmuext_op_compat_t) cmp_uops, unsigned int count, XEN_GUEST_HANDLE_PARAM(uint) pdone, diff --git a/xen/include/asm-x86/hypercall.h b/xen/include/asm-x86/hypercall.h index afa8ba9..cee8817 100644 --- a/xen/include/asm-x86/hypercall.h +++ b/xen/include/asm-x86/hypercall.h @@ -8,6 +8,7 @@ #include <public/physdev.h> #include <public/arch-x86/xen-mca.h> /* for do_mca */ #include <xen/types.h> +#include <compat/memory.h> /* * Both do_mmuext_op() and do_mmu_update(): @@ -110,4 +111,13 @@ extern int arch_compat_vcpu_op( int cmd, struct vcpu *v, XEN_GUEST_HANDLE_PARAM(void) arg); +DEFINE_XEN_GUEST_HANDLE(mmuext_op_compat_t); + +extern int +compat_mmuext_op( + XEN_GUEST_HANDLE_PARAM(mmuext_op_compat_t) cmp_uops, + unsigned int count, + XEN_GUEST_HANDLE_PARAM(uint) pdone, + unsigned int foreigndom); + #endif /* __ASM_X86_HYPERCALL_H__ */ -- 1.8.5.2 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |