diff -r d1677c0438fb -r 5458a9862db2 xen/arch/x86/hvm/asid.c --- a/xen/arch/x86/hvm/asid.c +++ b/xen/arch/x86/hvm/asid.c @@ -48,9 +48,9 @@ /* Per-CPU ASID management. */ struct hvm_asid_data { - u64 core_asid_generation; - u32 next_asid; - u32 max_asid; + uint64_t core_asid_generation; + uint32_t next_asid; + uint32_t max_asid; bool_t disabled; }; @@ -58,7 +58,7 @@ static DEFINE_PER_CPU(struct hvm_asid_da void hvm_asid_init(int nasids) { - static s8 g_disabled = -1; + static int8_t g_disabled = -1; struct hvm_asid_data *data = &this_cpu(hvm_asid_data); data->max_asid = nasids - 1; diff -r d1677c0438fb -r 5458a9862db2 xen/include/asm-x86/hvm/svm/asid.h --- a/xen/include/asm-x86/hvm/svm/asid.h +++ b/xen/include/asm-x86/hvm/svm/asid.h @@ -34,10 +34,7 @@ static inline void svm_asid_g_invlpg(str { #if 0 /* Optimization? */ - asm volatile (".byte 0x0F,0x01,0xDF \n" - : /* output */ - : /* input */ - "a" (g_vaddr), "c"(v->arch.hvm_svm.vmcb->guest_asid) ); + svm_invlpga(g_vaddr, v->arch.hvm_svm.vmcb->guest_asid); #endif /* Safe fallback. Take a new ASID. */ diff -r d1677c0438fb -r 5458a9862db2 xen/include/asm-x86/hvm/svm/svm.h --- a/xen/include/asm-x86/hvm/svm/svm.h +++ b/xen/include/asm-x86/hvm/svm/svm.h @@ -60,6 +60,15 @@ static inline void svm_vmsave(void *vmcb : : "a" (__pa(vmcb)) : "memory" ); } +static inline void svm_invlpga(unsigned long vaddr, uint32_t asid) +{ + asm volatile ( + ".byte 0x0f,0x01,0xdf" + : /* output */ + : /* input */ + "a" (vaddr), "c" (asid)); +} + unsigned long *svm_msrbit(unsigned long *msr_bitmap, uint32_t msr); void __update_guest_eip(struct cpu_user_regs *regs, unsigned int inst_len); diff -r d1677c0438fb -r 5458a9862db2 xen/include/asm-x86/hvm/vcpu.h --- a/xen/include/asm-x86/hvm/vcpu.h +++ b/xen/include/asm-x86/hvm/vcpu.h @@ -100,8 +100,8 @@ struct hvm_vcpu { bool_t hcall_preempted; bool_t hcall_64bit; - u64 asid_generation; - u32 asid; + uint64_t asid_generation; + uint32_t asid; u32 msr_tsc_aux;