[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86: move vgc_flags to struct pv_vcpu
There's been effectively no use of the field for HVM. Also shrink the field to unsigned int, even if this doesn't immediately yield any space benefit for the structure itself. The resulting 32-bit padding slot can eventually be used for some other field. The change in size makes accesses slightly more efficient though, as no REX.W prefix is going to be needed anymore on the respective insns. Mirror the HVM side change here (dropping of setting the field to VGCF_online) also to Arm, on the assumption that it was cloned like this originally. VGCF_online really should simply and consistently be the guest view of the inverse of VPF_down, and hence needs representing only in the get/set vCPU context interfaces. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/arm/vpsci.c +++ b/xen/arch/arm/vpsci.c @@ -78,7 +78,6 @@ static int do_common_cpu_on(register_t t ctxt->user_regs.x0 = context_id; } #endif - ctxt->flags = VGCF_online; domain_lock(d); rc = arch_set_info_guest(v, ctxt); --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -896,6 +896,8 @@ int arch_set_info_guest( if ( ((c(ldt_base) & (PAGE_SIZE - 1)) != 0) || (c(ldt_ents) > 8192) ) return -EINVAL; + + v->arch.pv.vgc_flags = flags; } v->arch.flags |= TF_kernel_mode; @@ -908,8 +910,6 @@ int arch_set_info_guest( !is_hvm_domain(d) && !is_pv_32bit_domain(d) ) v->arch.flags &= ~TF_kernel_mode; - v->arch.vgc_flags = flags; - vcpu_setup_fpu(v, v->arch.xsave_area, flags & VGCF_I387_VALID ? &c.nat->fpu_ctxt : NULL, FCW_DEFAULT); @@ -1488,7 +1488,7 @@ static void load_segments(struct vcpu *n domain_crash(n->domain); } - if ( n->arch.vgc_flags & VGCF_failsafe_disables_events ) + if ( n->arch.pv.vgc_flags & VGCF_failsafe_disables_events ) vcpu_info(n, evtchn_upcall_mask) = 1; regs->entry_vector |= TRAP_syscall; @@ -1527,7 +1527,7 @@ static void load_segments(struct vcpu *n domain_crash(n->domain); } - if ( n->arch.vgc_flags & VGCF_failsafe_disables_events ) + if ( n->arch.pv.vgc_flags & VGCF_failsafe_disables_events ) vcpu_info(n, evtchn_upcall_mask) = 1; regs->entry_vector |= TRAP_syscall; --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -1500,7 +1500,10 @@ void arch_get_info_guest(struct vcpu *v, #define c(fld) (!compat ? (c.nat->fld) : (c.cmp->fld)) memcpy(&c.nat->fpu_ctxt, v->arch.fpu_ctxt, sizeof(c.nat->fpu_ctxt)); - c(flags = v->arch.vgc_flags & ~(VGCF_i387_valid|VGCF_in_kernel)); + if ( is_pv_domain(d) ) + c(flags = v->arch.pv.vgc_flags & ~(VGCF_i387_valid|VGCF_in_kernel)); + else + c(flags = 0); if ( v->fpu_initialised ) c(flags |= VGCF_i387_valid); if ( !(v->pause_flags & VPF_down) ) --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -1151,8 +1151,6 @@ static int hvm_load_cpu_ctxt(struct doma v->arch.dr6 = ctxt.dr6; v->arch.dr7 = ctxt.dr7; - v->arch.vgc_flags = VGCF_online; - /* Auxiliary processors should be woken immediately. */ v->is_initialised = 1; clear_bit(_VPF_down, &v->pause_flags); @@ -3864,8 +3862,6 @@ void hvm_vcpu_reset_state(struct vcpu *v v->arch.xsave_area->xsave_hdr.xstate_bv = 0; vcpu_setup_fpu(v, v->arch.xsave_area, NULL, FCW_RESET); - v->arch.vgc_flags = VGCF_online; - arch_vcpu_regs_init(v); v->arch.user_regs.rip = ip; --- a/xen/arch/x86/pv/callback.c +++ b/xen/arch/x86/pv/callback.c @@ -82,17 +82,17 @@ static long register_guest_callback(stru case CALLBACKTYPE_failsafe: curr->arch.pv.failsafe_callback_eip = reg->address; if ( reg->flags & CALLBACKF_mask_events ) - curr->arch.vgc_flags |= VGCF_failsafe_disables_events; + curr->arch.pv.vgc_flags |= VGCF_failsafe_disables_events; else - curr->arch.vgc_flags &= ~VGCF_failsafe_disables_events; + curr->arch.pv.vgc_flags &= ~VGCF_failsafe_disables_events; break; case CALLBACKTYPE_syscall: curr->arch.pv.syscall_callback_eip = reg->address; if ( reg->flags & CALLBACKF_mask_events ) - curr->arch.vgc_flags |= VGCF_syscall_disables_events; + curr->arch.pv.vgc_flags |= VGCF_syscall_disables_events; else - curr->arch.vgc_flags &= ~VGCF_syscall_disables_events; + curr->arch.pv.vgc_flags &= ~VGCF_syscall_disables_events; break; case CALLBACKTYPE_syscall32: @@ -226,9 +226,9 @@ static long compat_register_guest_callba curr->arch.pv.failsafe_callback_cs = reg->address.cs; curr->arch.pv.failsafe_callback_eip = reg->address.eip; if ( reg->flags & CALLBACKF_mask_events ) - curr->arch.vgc_flags |= VGCF_failsafe_disables_events; + curr->arch.pv.vgc_flags |= VGCF_failsafe_disables_events; else - curr->arch.vgc_flags &= ~VGCF_failsafe_disables_events; + curr->arch.pv.vgc_flags &= ~VGCF_failsafe_disables_events; break; case CALLBACKTYPE_syscall32: --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -69,7 +69,7 @@ void __dummy__(void) OFFSET(VCPU_kernel_sp, struct vcpu, arch.pv.kernel_sp); OFFSET(VCPU_kernel_ss, struct vcpu, arch.pv.kernel_ss); OFFSET(VCPU_iopl, struct vcpu, arch.pv.iopl); - OFFSET(VCPU_guest_context_flags, struct vcpu, arch.vgc_flags); + OFFSET(VCPU_guest_context_flags, struct vcpu, arch.pv.vgc_flags); OFFSET(VCPU_cr3, struct vcpu, arch.cr3); OFFSET(VCPU_arch_msrs, struct vcpu, arch.msrs); OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending); --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -476,6 +476,8 @@ struct pv_vcpu /* map_domain_page() mapping cache. */ struct mapcache_vcpu mapcache; + unsigned int vgc_flags; + struct trap_info *trap_ctxt; unsigned long gdt_frames[FIRST_RESERVED_GDT_PAGE]; @@ -542,7 +544,6 @@ struct arch_vcpu */ void *fpu_ctxt; - unsigned long vgc_flags; struct cpu_user_regs user_regs; /* Debug registers. */ _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |