[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 3/8] x86/SVM: Add AVIC vmexit handlers
From: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> AVIC introduces two new #vmexit handlers: VMEXIT_INCOMP_IPI: This occurs when an IPI could not be delivered to all targeted guest virtual processors because at least one guest virtual processor was not allocated to a physical core at the time. In this case, Xen would try to emulate IPI. VMEXIT_DO_NOACCEL: This occurs when a guest access to an APIC register that cannot be accelerated by AVIC. In this case, Xen tries to emulate register accesses. This fault is also generated if an EOI is attempted when the highest priority in-service interrupt is set for level-triggered mode. This patch also declare vlapic_read_aligned() and vlapic_reg_write() as non-static to expose them to be used by AVIC. Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@xxxxxxx> Signed-off-by: Janakarajan Natarajan <Janakarajan.Natarajan@xxxxxxx> --- xen/arch/x86/hvm/svm/avic.c | 296 +++++++++++++++++++++++++++++++++++++ xen/arch/x86/hvm/svm/svm.c | 8 + xen/arch/x86/hvm/vlapic.c | 4 +- xen/include/asm-x86/hvm/svm/avic.h | 3 + xen/include/asm-x86/hvm/svm/vmcb.h | 6 + xen/include/asm-x86/hvm/vlapic.h | 4 + 6 files changed, 319 insertions(+), 2 deletions(-) diff --git a/xen/arch/x86/hvm/svm/avic.c b/xen/arch/x86/hvm/svm/avic.c index 8108698911..e112469774 100644 --- a/xen/arch/x86/hvm/svm/avic.c +++ b/xen/arch/x86/hvm/svm/avic.c @@ -19,6 +19,7 @@ #include <xen/sched.h> #include <xen/stdbool.h> #include <asm/acpi.h> +#include <asm/apic.h> #include <asm/apicdef.h> #include <asm/atomic.h> #include <asm/event.h> @@ -37,6 +38,8 @@ #define AVIC_VAPIC_BAR_MASK (((1ULL << 40) - 1) << PAGE_SHIFT) +#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0 + /* * Note: * Currently, svm-avic mode is not supported with nested virtualization. @@ -101,6 +104,8 @@ int svm_avic_dom_init(struct domain *d) d->arch.hvm_domain.svm.avic_physical_id_table_pg = pg; d->arch.hvm_domain.svm.avic_physical_id_table = __map_domain_page_global(pg); + spin_lock_init(&d->arch.hvm_domain.svm.avic_dfr_mode_lock); + return ret; err_out: svm_avic_dom_destroy(d); @@ -181,6 +186,297 @@ int svm_avic_init_vmcb(struct vcpu *v) } /* + * Note: + * This function handles the AVIC_INCOMP_IPI #vmexit when AVIC is enabled. + * The hardware generates this fault when an IPI could not be delivered + * to all targeted guest virtual processors because at least one guest + * virtual processor was not allocated to a physical core at the time. + */ +void svm_avic_vmexit_do_incomp_ipi(struct cpu_user_regs *regs) +{ + struct vcpu *curr = current; + struct domain *currd = curr->domain; + struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; + u32 icrh = vmcb->exitinfo1 >> 32; + u32 icrl = vmcb->exitinfo1; + u32 id = vmcb->exitinfo2 >> 32; + u32 index = vmcb->exitinfo2 && 0xFF; + + switch ( id ) + { + case AVIC_INCMP_IPI_ERR_INVALID_INT_TYPE: + /* + * AVIC hardware handles the delivery of + * IPIs when the specified Message Type is Fixed + * (also known as fixed delivery mode) and + * the Trigger Mode is edge-triggered. The hardware + * also supports self and broadcast delivery modes + * specified via the Destination Shorthand(DSH) + * field of the ICRL. Logical and physical APIC ID + * formats are supported. All other IPI types cause + * a #VMEXIT, which needs to emulated. + */ + vlapic_reg_write(curr, APIC_ICR2, icrh); + vlapic_reg_write(curr, APIC_ICR, icrl); + break; + + case AVIC_INCMP_IPI_ERR_TARGET_NOT_RUN: + { + /* + * At this point, we expect that the AVIC HW has already + * set the appropriate IRR bits on the valid target + * vcpus. So, we just need to kick the appropriate vcpu. + */ + struct vcpu *v; + uint32_t dest = GET_xAPIC_DEST_FIELD(icrh); + uint32_t short_hand = icrl & APIC_SHORT_MASK; + bool dest_mode = !!(icrl & APIC_DEST_MASK); + + for_each_vcpu ( currd, v ) + { + if ( v != curr && + vlapic_match_dest(vcpu_vlapic(v), vcpu_vlapic(curr), + short_hand, dest, dest_mode) ) + { + vcpu_kick(v); + break; + } + } + break; + } + + case AVIC_INCMP_IPI_ERR_INV_TARGET: + gprintk(XENLOG_ERR, + "SVM: %s: Invalid IPI target (icr=%#08x:%08x, idx=%u)\n", + __func__, icrh, icrl, index); + domain_crash(currd); + break; + + case AVIC_INCMP_IPI_ERR_INV_BK_PAGE: + gprintk(XENLOG_ERR, + "SVM: %s: Invalid bk page (icr=%#08x:%08x, idx=%u)\n", + __func__, icrh, icrl, index); + domain_crash(currd); + break; + + default: + gprintk(XENLOG_ERR, "SVM: %s: Unknown IPI interception (%#x)\n", + __func__, id); + domain_crash(currd); + } +} + +static struct avic_logical_id_entry * +avic_get_logical_id_entry(struct svm_domain *d, u32 ldr, bool flat) +{ + unsigned int index; + unsigned int dest_id = GET_xAPIC_LOGICAL_ID(ldr); + + if ( !dest_id ) + return NULL; + + if ( flat ) + { + index = ffs(dest_id) - 1; + if ( index > 7 ) + return NULL; + } + else + { + unsigned int cluster = (dest_id & 0xf0) >> 4; + int apic = ffs(dest_id & 0x0f) - 1; + + if ( (apic < 0) || (apic > 7) || (cluster >= 0xf) ) + return NULL; + index = (cluster << 2) + apic; + } + + ASSERT(index <= 255); + + return &d->avic_logical_id_table[index]; +} + +static int avic_ldr_write(struct vcpu *v, u8 g_phy_id, u32 ldr, bool valid) +{ + struct avic_logical_id_entry *entry, new_entry; + u32 dfr = vlapic_read_aligned(vcpu_vlapic(v), APIC_DFR); + + entry = avic_get_logical_id_entry(&v->domain->arch.hvm_domain.svm, + ldr, (dfr == APIC_DFR_FLAT)); + if (!entry) + return -EINVAL; + + new_entry = *entry; + smp_rmb(); + new_entry.guest_phy_apic_id = g_phy_id; + new_entry.valid = valid; + *entry = new_entry; + smp_wmb(); + + return 0; +} + +static int avic_handle_ldr_update(struct vcpu *v) +{ + int ret = 0; + u32 ldr = vlapic_read_aligned(vcpu_vlapic(v), APIC_LDR); + u32 apic_id = vlapic_read_aligned(vcpu_vlapic(v), APIC_ID); + + if ( !ldr ) + return -EINVAL; + + ret = avic_ldr_write(v, GET_xAPIC_ID(apic_id), ldr, true); + if ( ret && v->arch.hvm_svm.avic_last_ldr ) + { + /* + * Note: + * In case of failure to update LDR register, + * we set the guest physical APIC ID to 0, + * and set the entry logical APID ID entry + * to invalid (false). + */ + avic_ldr_write(v, 0, v->arch.hvm_svm.avic_last_ldr, false); + v->arch.hvm_svm.avic_last_ldr = 0; + } + else + { + /* + * Note: + * This saves the last valid LDR so that we + * know which entry in the local APIC ID + * to clean up when the LDR is updated. + */ + v->arch.hvm_svm.avic_last_ldr = ldr; + } + + return ret; +} + +static int avic_handle_dfr_update(struct vcpu *v) +{ + u32 mod; + struct svm_domain *d = &v->domain->arch.hvm_domain.svm; + u32 dfr = vlapic_read_aligned(vcpu_vlapic(v), APIC_DFR); + + mod = (dfr >> 28) & 0xFu; + + spin_lock(&d->avic_dfr_mode_lock); + if ( d->avic_dfr_mode != mod ) + { + /* + * We assume that all local APICs are using the same type. + * If DFR mode changes, we need to flush the domain AVIC logical + * APIC id table. + */ + clear_domain_page(_mfn(page_to_mfn(d->avic_logical_id_table_pg))); + d->avic_dfr_mode = mod; + } + spin_unlock(&d->avic_dfr_mode_lock); + + if ( v->arch.hvm_svm.avic_last_ldr ) + avic_handle_ldr_update(v); + + return 0; +} + +static int avic_unaccel_trap_write(struct vcpu *v) +{ + struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; + u32 offset = vmcb->exitinfo1 & AVIC_UNACCEL_ACCESS_OFFSET_MASK; + u32 reg = vlapic_read_aligned(vcpu_vlapic(v), offset); + + switch ( offset ) + { + case APIC_ID: + /* + * Currently, we do not support APIC_ID update while + * the vcpus are running, which might require updating + * AVIC max APIC ID in all VMCBs. This would require + * synchronize update on all running VCPUs. + */ + return X86EMUL_UNHANDLEABLE; + case APIC_LDR: + if ( avic_handle_ldr_update(v) ) + return X86EMUL_UNHANDLEABLE; + break; + + case APIC_DFR: + if ( avic_handle_dfr_update(v) ) + return X86EMUL_UNHANDLEABLE; + break; + + default: + break; + } + + vlapic_reg_write(v, offset, reg); + + return X86EMUL_OKAY; +} + +static inline bool avic_is_trap(u32 offset) +{ + u32 pos = offset >> 4; + static const unsigned long avic_trap[] = + { +#define REG(x) (1UL << (APIC_ ## x >> 4)) + REG(ID) | REG(EOI) | REG(RRR) | REG(LDR) | + REG(DFR) | REG(SPIV) | REG(ESR) | REG(ICR) | + REG(LVTT) | REG(LVTTHMR) | REG(LVTPC) | REG(LVT0) | + REG(LVT1) | REG(LVTERR) | REG(TMICT) | REG(TDCR) +#undef REG + }; + + if ( !test_bit(pos, avic_trap) ) + return false; + return true; +} + +/* + * Note: + * This function handles the AVIC_NOACCEL #vmexit when AVIC is enabled. + * The hardware generates this fault when : + * - A guest access to an APIC register that is not accelerated + * by AVIC hardware. + * - EOI is attempted when the highest priority in-service interrupt + * is level-triggered. + */ +void svm_avic_vmexit_do_noaccel(struct cpu_user_regs *regs) +{ + struct vcpu *curr = current; + struct vmcb_struct *vmcb = curr->arch.hvm_svm.vmcb; + u32 offset = vmcb->exitinfo1 & 0xFF0; + u32 rw = (vmcb->exitinfo1 >> 32) & 0x1; + + if ( avic_is_trap(offset) ) + { + /* Handling AVIC Trap (intercept right after the access). */ + if ( !rw ) + { + /* + * If a read trap happens, the CPU microcode does not + * implement the spec. + */ + gprintk(XENLOG_ERR, "%s: Invalid #VMEXIT due to trap read (%#x)\n", + __func__, offset); + domain_crash(curr->domain); + } + + if ( avic_unaccel_trap_write(curr) != X86EMUL_OKAY ) + { + gprintk(XENLOG_ERR, "%s: Failed to handle trap write (%#x)\n", + __func__, offset); + domain_crash(curr->domain); + } + } + else + /* Handling AVIC Fault (intercept before the access). */ + hvm_emulate_one_vm_event(EMUL_KIND_NORMAL, TRAP_invalid_op, + X86_EVENT_NO_EC); + return; +} + +/* * Local variables: * mode: C * c-file-style: "BSD" diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index b445f59ada..c47042bf6b 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -2976,6 +2976,14 @@ void svm_vmexit_handler(struct cpu_user_regs *regs) VM_EVENT_DESC_TR, exit_reason == VMEXIT_TR_WRITE); break; + case VMEXIT_AVIC_INCOMP_IPI: + svm_avic_vmexit_do_incomp_ipi(regs); + break; + + case VMEXIT_AVIC_NOACCEL: + svm_avic_vmexit_do_noaccel(regs); + break; + default: unexpected_exit_type: gprintk(XENLOG_ERR, "Unexpected vmexit: reason %#"PRIx64", " diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index a4472200a6..64c4e8dd93 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -592,7 +592,7 @@ static void vlapic_set_tdcr(struct vlapic *vlapic, unsigned int val) "timer_divisor: %d", vlapic->hw.timer_divisor); } -static uint32_t vlapic_read_aligned(const struct vlapic *vlapic, +uint32_t vlapic_read_aligned(const struct vlapic *vlapic, unsigned int offset) { switch ( offset ) @@ -788,7 +788,7 @@ static void vlapic_update_timer(struct vlapic *vlapic, uint32_t lvtt, } } -static void vlapic_reg_write(struct vcpu *v, +void vlapic_reg_write(struct vcpu *v, unsigned int offset, uint32_t val) { struct vlapic *vlapic = vcpu_vlapic(v); diff --git a/xen/include/asm-x86/hvm/svm/avic.h b/xen/include/asm-x86/hvm/svm/avic.h index 32bb9a91e8..7af850ecbd 100644 --- a/xen/include/asm-x86/hvm/svm/avic.h +++ b/xen/include/asm-x86/hvm/svm/avic.h @@ -33,4 +33,7 @@ void svm_avic_dom_destroy(struct domain *d); bool svm_avic_vcpu_enabled(const struct vcpu *v); int svm_avic_init_vmcb(struct vcpu *v); +void svm_avic_vmexit_do_incomp_ipi(struct cpu_user_regs *regs); +void svm_avic_vmexit_do_noaccel(struct cpu_user_regs *regs); + #endif /* _SVM_AVIC_H_ */ diff --git a/xen/include/asm-x86/hvm/svm/vmcb.h b/xen/include/asm-x86/hvm/svm/vmcb.h index 386aad2260..c6384ae197 100644 --- a/xen/include/asm-x86/hvm/svm/vmcb.h +++ b/xen/include/asm-x86/hvm/svm/vmcb.h @@ -301,6 +301,8 @@ enum VMEXIT_EXITCODE VMEXIT_MWAIT_CONDITIONAL= 140, /* 0x8c */ VMEXIT_XSETBV = 141, /* 0x8d */ VMEXIT_NPF = 1024, /* 0x400, nested paging fault */ + VMEXIT_AVIC_INCOMP_IPI = 1025, /* 0x401 */ + VMEXIT_AVIC_NOACCEL = 1026, /* 0x402 */ VMEXIT_INVALID = -1 }; @@ -515,6 +517,9 @@ struct svm_domain { */ struct avic_logical_id_entry *avic_logical_id_table; struct page_info *avic_logical_id_table_pg; + + u32 avic_dfr_mode; + spinlock_t avic_dfr_mode_lock; }; struct arch_svm_struct { @@ -550,6 +555,7 @@ struct arch_svm_struct { } osvw; struct avic_physical_id_entry *avic_last_phy_id; + u32 avic_last_ldr; }; struct vmcb_struct *alloc_vmcb(void); diff --git a/xen/include/asm-x86/hvm/vlapic.h b/xen/include/asm-x86/hvm/vlapic.h index 212c36b5c2..3a56b6db0e 100644 --- a/xen/include/asm-x86/hvm/vlapic.h +++ b/xen/include/asm-x86/hvm/vlapic.h @@ -137,6 +137,10 @@ void vlapic_ipi(struct vlapic *vlapic, uint32_t icr_low, uint32_t icr_high); int vlapic_apicv_write(struct vcpu *v, unsigned int offset); +void vlapic_reg_write(struct vcpu *v, unsigned int offset, uint32_t val); + +uint32_t vlapic_read_aligned(const struct vlapic *vlapic, unsigned int offset); + struct vlapic *vlapic_lowest_prio( struct domain *d, const struct vlapic *source, int short_hand, uint32_t dest, bool_t dest_mode); -- 2.11.0 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |