x86: #PF error code adjustments Add a definition for the (for now unused) protection key related error code bit, moving our own custom ones out of the way. In the course of checking the uses of the latter I realized that while right now they can only get set on their own, callers would better not depend on that property and check just for the bit rather than matching the entire value. Signed-off-by: Jan Beulich --- RFC because I noticed that nothing seems to ever set PFEC_page_paged, so I wonder whether we really need that flag. It also seems to me that the part of paging_gva_to_gfn() dealing with the nested case can't be quite right: Neither is there any check after mode->gva_to_gfn() (namely ignoring INVALID_GFN being returned), nor does the handling of the two involved error code values seem reasonable. One of the many reasons why nested HVM can't be expected to reach "supported" state any time soon, I guess. --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -447,7 +447,7 @@ static int hvmemul_linear_to_phys( } else if ( (pfn = paging_gva_to_gfn(curr, addr, &pfec)) == INVALID_GFN ) { - if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared ) + if ( pfec & (PFEC_page_paged | PFEC_page_shared) ) return X86EMUL_RETRY; hvm_inject_page_fault(pfec, addr); return X86EMUL_EXCEPTION; @@ -464,7 +464,7 @@ static int hvmemul_linear_to_phys( /* Is it contiguous with the preceding PFNs? If not then we're done. */ if ( (npfn == INVALID_GFN) || (npfn != (pfn + (reverse ? -i : i))) ) { - if ( pfec == PFEC_page_paged || pfec == PFEC_page_shared ) + if ( pfec & (PFEC_page_paged | PFEC_page_shared) ) return X86EMUL_RETRY; done /= bytes_per_rep; if ( done == 0 ) --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3821,7 +3821,7 @@ static void *hvm_map_entry(unsigned long */ pfec = PFEC_page_present; gfn = paging_gva_to_gfn(current, va, &pfec); - if ( (pfec == PFEC_page_paged) || (pfec == PFEC_page_shared) ) + if ( pfec & (PFEC_page_paged | PFEC_page_shared) ) goto fail; v = hvm_map_guest_frame_rw(gfn, 0, writable); @@ -4212,9 +4212,9 @@ static enum hvm_copy_result __hvm_copy( gfn = paging_gva_to_gfn(curr, addr, &pfec); if ( gfn == INVALID_GFN ) { - if ( pfec == PFEC_page_paged ) + if ( pfec & PFEC_page_paged ) return HVMCOPY_gfn_paged_out; - if ( pfec == PFEC_page_shared ) + if ( pfec & PFEC_page_shared ) return HVMCOPY_gfn_shared; if ( flags & HVMCOPY_fault ) hvm_inject_page_fault(pfec, addr); @@ -4327,9 +4327,9 @@ static enum hvm_copy_result __hvm_clear( gfn = paging_gva_to_gfn(curr, addr, &pfec); if ( gfn == INVALID_GFN ) { - if ( pfec == PFEC_page_paged ) + if ( pfec & PFEC_page_paged ) return HVMCOPY_gfn_paged_out; - if ( pfec == PFEC_page_shared ) + if ( pfec & PFEC_page_shared ) return HVMCOPY_gfn_shared; return HVMCOPY_bad_gva_to_gfn; } --- a/xen/include/asm-x86/processor.h +++ b/xen/include/asm-x86/processor.h @@ -140,8 +140,10 @@ #define PFEC_user_mode (1U<<2) #define PFEC_reserved_bit (1U<<3) #define PFEC_insn_fetch (1U<<4) -#define PFEC_page_paged (1U<<5) -#define PFEC_page_shared (1U<<6) +#define PFEC_prot_key (1U<<5) +/* Internally used only flags. */ +#define PFEC_page_paged (1U<<16) +#define PFEC_page_shared (1U<<17) /* Other exception error code values. */ #define X86_XEC_EXT (_AC(1,U) << 0)