[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [XEN PATCH] x86/guest_walk: address violations of MISRA C:2012 Rule 8.3
Uniform declaration and definition of guest_walk_tables() using parameter name "pfec_walk": this name highlights the connection with PFEC_* constants and it is consistent with the use of the parameter within function body. No functional change. Signed-off-by: Federico Serafini <federico.serafini@xxxxxxxxxxx> --- xen/arch/x86/include/asm/guest_pt.h | 2 +- xen/arch/x86/mm/guest_walk.c | 39 +++++++++++++++-------------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/xen/arch/x86/include/asm/guest_pt.h b/xen/arch/x86/include/asm/guest_pt.h index bc312343cd..5edf687dce 100644 --- a/xen/arch/x86/include/asm/guest_pt.h +++ b/xen/arch/x86/include/asm/guest_pt.h @@ -422,7 +422,7 @@ static inline unsigned int guest_walk_to_page_order(const walk_t *gw) bool guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, - unsigned long va, walk_t *gw, uint32_t pfec, + unsigned long va, walk_t *gw, uint32_t pfec_walk, gfn_t top_gfn, mfn_t top_mfn, void *top_map); /* Pretty-print the contents of a guest-walk */ diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c index fe7393334f..20a19bd7e2 100644 --- a/xen/arch/x86/mm/guest_walk.c +++ b/xen/arch/x86/mm/guest_walk.c @@ -69,7 +69,7 @@ static bool set_ad_bits(guest_intpte_t *guest_p, guest_intpte_t *walk_p, */ bool guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, - unsigned long va, walk_t *gw, uint32_t walk, + unsigned long va, walk_t *gw, uint32_t pfec_walk, gfn_t top_gfn, mfn_t top_mfn, void *top_map) { struct domain *d = v->domain; @@ -100,16 +100,17 @@ guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, * inputs to a guest walk, but a whole load of code currently passes in * other PFEC_ constants. */ - walk &= (PFEC_implicit | PFEC_insn_fetch | PFEC_user_mode | PFEC_write_access); + pfec_walk &= (PFEC_implicit | PFEC_insn_fetch | PFEC_user_mode | + PFEC_write_access); /* Only implicit supervisor data accesses exist. */ - ASSERT(!(walk & PFEC_implicit) || - !(walk & (PFEC_insn_fetch | PFEC_user_mode))); + ASSERT(!(pfec_walk & PFEC_implicit) || + !(pfec_walk & (PFEC_insn_fetch | PFEC_user_mode))); perfc_incr(guest_walk); memset(gw, 0, sizeof(*gw)); gw->va = va; - gw->pfec = walk & (PFEC_user_mode | PFEC_write_access); + gw->pfec = pfec_walk & (PFEC_user_mode | PFEC_write_access); /* * PFEC_insn_fetch is only reported if NX or SMEP are enabled. Hardware @@ -117,7 +118,7 @@ guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, * rights. */ if ( guest_nx_enabled(v) || guest_smep_enabled(v) ) - gw->pfec |= (walk & PFEC_insn_fetch); + gw->pfec |= (pfec_walk & PFEC_insn_fetch); #if GUEST_PAGING_LEVELS >= 3 /* PAE or 64... */ #if GUEST_PAGING_LEVELS >= 4 /* 64-bit only... */ @@ -399,7 +400,7 @@ guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, * N.B. In the case that the walk ended with a superpage, the fabricated * gw->l1e contains the appropriate leaf pkey. */ - if ( !(walk & PFEC_insn_fetch) && + if ( !(pfec_walk & PFEC_insn_fetch) && ((ar & _PAGE_USER) ? guest_pku_enabled(v) : guest_pks_enabled(v)) ) { @@ -408,8 +409,8 @@ guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, unsigned int pk_ar = (pkr >> (pkey * PKEY_WIDTH)) & (PKEY_AD | PKEY_WD); if ( (pk_ar & PKEY_AD) || - ((walk & PFEC_write_access) && (pk_ar & PKEY_WD) && - ((walk & PFEC_user_mode) || guest_wp_enabled(v))) ) + ((pfec_walk & PFEC_write_access) && (pk_ar & PKEY_WD) && + ((pfec_walk & PFEC_user_mode) || guest_wp_enabled(v))) ) { gw->pfec |= PFEC_prot_key; goto out; @@ -417,17 +418,17 @@ guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, } #endif - if ( (walk & PFEC_insn_fetch) && (ar & _PAGE_NX_BIT) ) + if ( (pfec_walk & PFEC_insn_fetch) && (ar & _PAGE_NX_BIT) ) /* Requested an instruction fetch and found NX? Fail. */ goto out; - if ( walk & PFEC_user_mode ) /* Requested a user acess. */ + if ( pfec_walk & PFEC_user_mode ) /* Requested a user acess. */ { if ( !(ar & _PAGE_USER) ) /* Got a supervisor walk? Unconditional fail. */ goto out; - if ( (walk & PFEC_write_access) && !(ar & _PAGE_RW) ) + if ( (pfec_walk & PFEC_write_access) && !(ar & _PAGE_RW) ) /* Requested a write and only got a read? Fail. */ goto out; } @@ -435,18 +436,18 @@ guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, { if ( ar & _PAGE_USER ) /* Got a user walk. */ { - if ( (walk & PFEC_insn_fetch) && guest_smep_enabled(v) ) + if ( (pfec_walk & PFEC_insn_fetch) && guest_smep_enabled(v) ) /* User insn fetch and smep? Fail. */ goto out; - if ( !(walk & PFEC_insn_fetch) && guest_smap_enabled(v) && - ((walk & PFEC_implicit) || + if ( !(pfec_walk & PFEC_insn_fetch) && guest_smap_enabled(v) && + ((pfec_walk & PFEC_implicit) || !(guest_cpu_user_regs()->eflags & X86_EFLAGS_AC)) ) /* User data access and smap? Fail. */ goto out; } - if ( (walk & PFEC_write_access) && !(ar & _PAGE_RW) && + if ( (pfec_walk & PFEC_write_access) && !(ar & _PAGE_RW) && guest_wp_enabled(v) ) /* Requested a write, got a read, and CR0.WP is set? Fail. */ goto out; @@ -468,7 +469,7 @@ guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, case 1: if ( set_ad_bits(&l1p[guest_l1_table_offset(va)].l1, &gw->l1e.l1, - (walk & PFEC_write_access)) ) + (pfec_walk & PFEC_write_access)) ) { paging_mark_dirty(d, gw->l1mfn); hvmemul_write_cache(v, l1gpa, &gw->l1e, sizeof(gw->l1e)); @@ -476,7 +477,7 @@ guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, /* Fallthrough */ case 2: if ( set_ad_bits(&l2p[guest_l2_table_offset(va)].l2, &gw->l2e.l2, - (walk & PFEC_write_access) && leaf_level == 2) ) + (pfec_walk & PFEC_write_access) && leaf_level == 2) ) { paging_mark_dirty(d, gw->l2mfn); hvmemul_write_cache(v, l2gpa, &gw->l2e, sizeof(gw->l2e)); @@ -485,7 +486,7 @@ guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, #if GUEST_PAGING_LEVELS == 4 /* 64-bit only... */ case 3: if ( set_ad_bits(&l3p[guest_l3_table_offset(va)].l3, &gw->l3e.l3, - (walk & PFEC_write_access) && leaf_level == 3) ) + (pfec_walk & PFEC_write_access) && leaf_level == 3) ) { paging_mark_dirty(d, gw->l3mfn); hvmemul_write_cache(v, l3gpa, &gw->l3e, sizeof(gw->l3e)); -- 2.34.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |