[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [XEN PATCH 09/13] x86/mm: address violations of MISRA C:2012 Rule 7.3
From: Gianluca Luparini <gianluca.luparini@xxxxxxxxxxx> The xen sources contain violations of MISRA C:2012 Rule 7.3 whose headline states: "The lowercase character 'l' shall not be used in a literal suffix". Use the "L" suffix instead of the "l" suffix, to avoid potential ambiguity. If the "u" suffix is used near "L", use the "U" suffix instead, for consistency. The changes in this patch are mechanical. Signed-off-by: Gianluca Luparini <gianluca.luparini@xxxxxxxxxxx> Signed-off-by: Simone Ballarin <simone.ballarin@xxxxxxxxxxx> --- xen/arch/x86/mm/p2m-pt.c | 6 +++--- xen/arch/x86/mm/p2m.c | 20 ++++++++++---------- xen/arch/x86/mm/physmap.c | 4 ++-- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c index 6d1bb5daad..b2b14746c1 100644 --- a/xen/arch/x86/mm/p2m-pt.c +++ b/xen/arch/x86/mm/p2m-pt.c @@ -552,7 +552,7 @@ static void check_entry(mfn_t mfn, p2m_type_t new, p2m_type_t old, if ( new == p2m_mmio_direct ) ASSERT(!mfn_eq(mfn, INVALID_MFN) && !rangeset_overlaps_range(mmio_ro_ranges, mfn_x(mfn), - mfn_x(mfn) + (1ul << order))); + mfn_x(mfn) + (1UL << order))); else if ( p2m_allows_invalid_mfn(new) || new == p2m_invalid || new == p2m_mmio_dm ) ASSERT(mfn_valid(mfn) || mfn_eq(mfn, INVALID_MFN)); @@ -745,9 +745,9 @@ p2m_pt_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn, if ( need_iommu_pt_sync(p2m->domain) && (iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) ) rc = iommu_pte_flags - ? iommu_legacy_map(d, _dfn(gfn), mfn, 1ul << page_order, + ? iommu_legacy_map(d, _dfn(gfn), mfn, 1UL << page_order, iommu_pte_flags) - : iommu_legacy_unmap(d, _dfn(gfn), 1ul << page_order); + : iommu_legacy_unmap(d, _dfn(gfn), 1UL << page_order); /* * Free old intermediate tables if necessary. This has to be the diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index f6df35767a..0983bd71d9 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -392,7 +392,7 @@ int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn, unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma) { bool hap = hap_enabled(p2m->domain); - unsigned long todo = 1ul << page_order; + unsigned long todo = 1UL << page_order; int set_rc, rc = 0; ASSERT(gfn_locked_by_me(p2m, gfn)); @@ -401,10 +401,10 @@ int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn, { unsigned long fn_mask = (!mfn_eq(mfn, INVALID_MFN) ? mfn_x(mfn) : 0) | gfn_x(gfn) | todo; - unsigned int order = (!(fn_mask & ((1ul << PAGE_ORDER_1G) - 1)) && + unsigned int order = (!(fn_mask & ((1UL << PAGE_ORDER_1G) - 1)) && hap && hap_has_1gb) ? PAGE_ORDER_1G - : (!(fn_mask & ((1ul << PAGE_ORDER_2M) - 1)) && + : (!(fn_mask & ((1UL << PAGE_ORDER_2M) - 1)) && (!hap || hap_has_2mb)) ? PAGE_ORDER_2M : PAGE_ORDER_4K; @@ -412,10 +412,10 @@ int p2m_set_entry(struct p2m_domain *p2m, gfn_t gfn, mfn_t mfn, if ( set_rc ) rc = set_rc; - gfn = gfn_add(gfn, 1ul << order); + gfn = gfn_add(gfn, 1UL << order); if ( !mfn_eq(mfn, INVALID_MFN) ) - mfn = mfn_add(mfn, 1ul << order); - todo -= 1ul << order; + mfn = mfn_add(mfn, 1UL << order); + todo -= 1UL << order; } return rc; @@ -1407,7 +1407,7 @@ void np2m_flush_base(struct vcpu *v, unsigned long np2m_base) struct p2m_domain *p2m; unsigned int i; - np2m_base &= ~(0xfffull); + np2m_base &= ~(0xfffULL); nestedp2m_lock(d); for ( i = 0; i < MAX_NESTEDP2M; i++ ) @@ -1456,7 +1456,7 @@ p2m_get_nestedp2m_locked(struct vcpu *v) bool needs_flush = true; /* Mask out low bits; this avoids collisions with P2M_BASE_EADDR */ - np2m_base &= ~(0xfffull); + np2m_base &= ~(0xfffULL); if (nv->nv_flushp2m && nv->nv_p2m) { nv->nv_p2m = NULL; @@ -1614,8 +1614,8 @@ unsigned long paging_gva_to_gfn(struct vcpu *v, * Sanity check that l1_gfn can be used properly as a 4K mapping, even * if it mapped by a nested superpage. */ - ASSERT((l2_gfn & ((1ul << l1_page_order) - 1)) == - (l1_gfn & ((1ul << l1_page_order) - 1))); + ASSERT((l2_gfn & ((1UL << l1_page_order) - 1)) == + (l1_gfn & ((1UL << l1_page_order) - 1))); return l1_gfn; } diff --git a/xen/arch/x86/mm/physmap.c b/xen/arch/x86/mm/physmap.c index f1695e456e..098ccdf541 100644 --- a/xen/arch/x86/mm/physmap.c +++ b/xen/arch/x86/mm/physmap.c @@ -72,7 +72,7 @@ int set_identity_p2m_entry(struct domain *d, unsigned long gfn, if ( !is_iommu_enabled(d) ) return 0; return iommu_legacy_map(d, _dfn(gfn), _mfn(gfn), - 1ul << PAGE_ORDER_4K, + 1UL << PAGE_ORDER_4K, p2m_access_to_iommu_flags(p2ma)); } @@ -85,7 +85,7 @@ int clear_identity_p2m_entry(struct domain *d, unsigned long gfn) { if ( !is_iommu_enabled(d) ) return 0; - return iommu_legacy_unmap(d, _dfn(gfn), 1ul << PAGE_ORDER_4K); + return iommu_legacy_unmap(d, _dfn(gfn), 1UL << PAGE_ORDER_4K); } return p2m_remove_identity_entry(d, gfn); -- 2.34.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |