[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen staging] viridian: introduce struct viridian_page
commit b8f0767b438bb00b3bddb58dd0bb6001cff29d23 Author: Paul Durrant <paul.durrant@xxxxxxxxxx> AuthorDate: Fri Nov 9 11:40:12 2018 +0100 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Fri Nov 9 11:40:12 2018 +0100 viridian: introduce struct viridian_page The 'vp_assist' page is currently an example of a guest page which needs to be kept mapped throughout the life-time of a guest, but there are other such examples in the specifiction [1]. This patch therefore introduces a generic 'viridian_page' type and converts the current vp_assist/apic_assist related code to use it. Subsequent patches implementing other enlightments can then also make use of it. This patch also renames the 'vp_assist_pending' field in struct hvm_viridian_vcpu_context to 'apic_assist_pending' to more accurately reflect its meaning. The term 'vp_assist' applies to the whole page rather than just the EOI-avoidance enlightenment. New versons of the specification have defined data structures for other enlightenments within the same page. No functional change. [1] https://github.com/MicrosoftDocs/Virtualization-Documentation/raw/live/tlfs/Hypervisor%20Top%20Level%20Functional%20Specification%20v5.0C.pdf Signed-off-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Reviewed-by: Roger Pau Monne <roger.pau@xxxxxxxxxx> Reviewed-by: Wei Liu <wei.liu2@xxxxxxxxxx> --- tools/misc/xen-hvmctx.c | 4 +- xen/arch/x86/hvm/viridian/private.h | 5 ++ xen/arch/x86/hvm/viridian/synic.c | 90 ++++++---------------------------- xen/arch/x86/hvm/viridian/viridian.c | 57 +++++++++++++++++++++ xen/include/asm-x86/hvm/viridian.h | 13 ++--- xen/include/public/arch-x86/hvm/save.h | 2 +- 6 files changed, 87 insertions(+), 84 deletions(-) diff --git a/tools/misc/xen-hvmctx.c b/tools/misc/xen-hvmctx.c index 823aa7d736..4f336a6cea 100644 --- a/tools/misc/xen-hvmctx.c +++ b/tools/misc/xen-hvmctx.c @@ -370,9 +370,9 @@ static void dump_viridian_vcpu(void) { HVM_SAVE_TYPE(VIRIDIAN_VCPU) p; READ(p); - printf(" VIRIDIAN_VCPU: vp_assist_msr 0x%llx, vp_assist_pending %s\n", + printf(" VIRIDIAN_VCPU: vp_assist_msr 0x%llx, apic_assist_pending %s\n", (unsigned long long) p.vp_assist_msr, - p.vp_assist_pending ? "true" : "false"); + p.apic_assist_pending ? "true" : "false"); } static void dump_vmce_vcpu(void) diff --git a/xen/arch/x86/hvm/viridian/private.h b/xen/arch/x86/hvm/viridian/private.h index a5e06f9866..398b22f12d 100644 --- a/xen/arch/x86/hvm/viridian/private.h +++ b/xen/arch/x86/hvm/viridian/private.h @@ -87,6 +87,11 @@ void viridian_time_save_domain_ctxt( void viridian_time_load_domain_ctxt( struct domain *d, const struct hvm_viridian_domain_context *ctxt); +void viridian_dump_guest_page(const struct vcpu *v, const char *name, + const struct viridian_page *vp); +void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp); +void viridian_unmap_guest_page(struct viridian_page *vp); + #endif /* X86_HVM_VIRIDIAN_PRIVATE_H */ /* diff --git a/xen/arch/x86/hvm/viridian/synic.c b/xen/arch/x86/hvm/viridian/synic.c index d8d6f6e1c9..845029b568 100644 --- a/xen/arch/x86/hvm/viridian/synic.c +++ b/xen/arch/x86/hvm/viridian/synic.c @@ -22,73 +22,11 @@ typedef struct _HV_VIRTUAL_APIC_ASSIST uint32_t reserved_zero:31; } HV_VIRTUAL_APIC_ASSIST; -union _HV_VP_ASSIST_PAGE +typedef union _HV_VP_ASSIST_PAGE { HV_VIRTUAL_APIC_ASSIST ApicAssist; uint8_t ReservedZBytePadding[PAGE_SIZE]; -}; - -static void dump_vp_assist(const struct vcpu *v) -{ - const union viridian_page_msr *va = &v->arch.hvm.viridian.vp_assist.msr; - - if ( !va->fields.enabled ) - return; - - printk(XENLOG_G_INFO "%pv: VIRIDIAN VP_ASSIST_PAGE: pfn: %lx\n", - v, (unsigned long)va->fields.pfn); -} - -static void initialize_vp_assist(struct vcpu *v) -{ - struct domain *d = v->domain; - unsigned long gmfn = v->arch.hvm.viridian.vp_assist.msr.fields.pfn; - struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); - HV_VP_ASSIST_PAGE *ptr; - - ASSERT(!v->arch.hvm.viridian.vp_assist.ptr); - - if ( !page ) - goto fail; - - if ( !get_page_type(page, PGT_writable_page) ) - { - put_page(page); - goto fail; - } - - ptr = __map_domain_page_global(page); - if ( !ptr ) - { - put_page_and_type(page); - goto fail; - } - - clear_page(ptr); - - v->arch.hvm.viridian.vp_assist.ptr = ptr; - return; - - fail: - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); -} - -static void teardown_vp_assist(struct vcpu *v) -{ - HV_VP_ASSIST_PAGE *ptr = v->arch.hvm.viridian.vp_assist.ptr; - struct page_info *page; - - if ( !ptr ) - return; - - v->arch.hvm.viridian.vp_assist.ptr = NULL; - - page = mfn_to_page(domain_page_map_to_mfn(ptr)); - - unmap_domain_page_global(ptr); - put_page_and_type(page); -} +} HV_VP_ASSIST_PAGE; void viridian_apic_assist_set(struct vcpu *v) { @@ -102,10 +40,10 @@ void viridian_apic_assist_set(struct vcpu *v) * wrong and the VM will most likely hang so force a crash now * to make the problem clear. */ - if ( v->arch.hvm.viridian.vp_assist.pending ) + if ( v->arch.hvm.viridian.apic_assist_pending ) domain_crash(v->domain); - v->arch.hvm.viridian.vp_assist.pending = true; + v->arch.hvm.viridian.apic_assist_pending = true; ptr->ApicAssist.no_eoi = 1; } @@ -116,11 +54,11 @@ bool viridian_apic_assist_completed(struct vcpu *v) if ( !ptr ) return false; - if ( v->arch.hvm.viridian.vp_assist.pending && + if ( v->arch.hvm.viridian.apic_assist_pending && !ptr->ApicAssist.no_eoi ) { /* An EOI has been avoided */ - v->arch.hvm.viridian.vp_assist.pending = false; + v->arch.hvm.viridian.apic_assist_pending = false; return true; } @@ -135,7 +73,7 @@ void viridian_apic_assist_clear(struct vcpu *v) return; ptr->ApicAssist.no_eoi = 0; - v->arch.hvm.viridian.vp_assist.pending = false; + v->arch.hvm.viridian.apic_assist_pending = false; } int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val) @@ -161,11 +99,13 @@ int viridian_synic_wrmsr(struct vcpu *v, uint32_t idx, uint64_t val) break; case HV_X64_MSR_VP_ASSIST_PAGE: - teardown_vp_assist(v); /* release any previous mapping */ + /* release any previous mapping */ + viridian_unmap_guest_page(&v->arch.hvm.viridian.vp_assist); v->arch.hvm.viridian.vp_assist.msr.raw = val; - dump_vp_assist(v); + viridian_dump_guest_page(v, "VP_ASSIST", + &v->arch.hvm.viridian.vp_assist); if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled ) - initialize_vp_assist(v); + viridian_map_guest_page(v, &v->arch.hvm.viridian.vp_assist); break; default: @@ -211,7 +151,7 @@ int viridian_synic_rdmsr(const struct vcpu *v, uint32_t idx, uint64_t *val) void viridian_synic_save_vcpu_ctxt(const struct vcpu *v, struct hvm_viridian_vcpu_context *ctxt) { - ctxt->vp_assist_pending = v->arch.hvm.viridian.vp_assist.pending; + ctxt->apic_assist_pending = v->arch.hvm.viridian.apic_assist_pending; ctxt->vp_assist_msr = v->arch.hvm.viridian.vp_assist.msr.raw; } @@ -220,9 +160,9 @@ void viridian_synic_load_vcpu_ctxt( { v->arch.hvm.viridian.vp_assist.msr.raw = ctxt->vp_assist_msr; if ( v->arch.hvm.viridian.vp_assist.msr.fields.enabled ) - initialize_vp_assist(v); + viridian_map_guest_page(v, &v->arch.hvm.viridian.vp_assist); - v->arch.hvm.viridian.vp_assist.pending = !!ctxt->vp_assist_pending; + v->arch.hvm.viridian.apic_assist_pending = ctxt->apic_assist_pending; } /* diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c index 8630bc7bb6..7d73f41de6 100644 --- a/xen/arch/x86/hvm/viridian/viridian.c +++ b/xen/arch/x86/hvm/viridian/viridian.c @@ -588,6 +588,63 @@ out: return HVM_HCALL_completed; } +void viridian_dump_guest_page(const struct vcpu *v, const char *name, + const struct viridian_page *vp) +{ + if ( !vp->msr.fields.enabled ) + return; + + printk(XENLOG_G_INFO "%pv: VIRIDIAN %s: pfn: %lx\n", + v, name, (unsigned long)vp->msr.fields.pfn); +} + +void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp) +{ + struct domain *d = v->domain; + unsigned long gmfn = vp->msr.fields.pfn; + struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); + + ASSERT(!vp->ptr); + + if ( !page ) + goto fail; + + if ( !get_page_type(page, PGT_writable_page) ) + { + put_page(page); + goto fail; + } + + vp->ptr = __map_domain_page_global(page); + if ( !vp->ptr ) + { + put_page_and_type(page); + goto fail; + } + + clear_page(vp->ptr); + return; + + fail: + gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", + gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); +} + +void viridian_unmap_guest_page(struct viridian_page *vp) +{ + struct page_info *page; + + if ( !vp->ptr ) + return; + + page = mfn_to_page(domain_page_map_to_mfn(vp->ptr)); + + unmap_domain_page_global(vp->ptr); + vp->ptr = NULL; + + put_page_and_type(page); +} + static int viridian_save_domain_ctxt(struct vcpu *v, hvm_domain_context_t *h) { diff --git a/xen/include/asm-x86/hvm/viridian.h b/xen/include/asm-x86/hvm/viridian.h index 22f14a526e..ec5ef8d3f9 100644 --- a/xen/include/asm-x86/hvm/viridian.h +++ b/xen/include/asm-x86/hvm/viridian.h @@ -20,15 +20,16 @@ union viridian_page_msr } fields; }; -typedef union _HV_VP_ASSIST_PAGE HV_VP_ASSIST_PAGE; +struct viridian_page +{ + union viridian_page_msr msr; + void *ptr; +}; struct viridian_vcpu { - struct { - union viridian_page_msr msr; - HV_VP_ASSIST_PAGE *ptr; - bool pending; - } vp_assist; + struct viridian_page vp_assist; + bool apic_assist_pending; uint64_t crash_param[5]; }; diff --git a/xen/include/public/arch-x86/hvm/save.h b/xen/include/public/arch-x86/hvm/save.h index 4691d4d4aa..80e762c335 100644 --- a/xen/include/public/arch-x86/hvm/save.h +++ b/xen/include/public/arch-x86/hvm/save.h @@ -600,7 +600,7 @@ DECLARE_HVM_SAVE_TYPE(VIRIDIAN_DOMAIN, 15, struct hvm_viridian_domain_context); struct hvm_viridian_vcpu_context { uint64_t vp_assist_msr; - uint8_t vp_assist_pending; + uint8_t apic_assist_pending; uint8_t _pad[7]; }; -- generated by git-patchbot for /home/xen/git/xen.git#staging _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |