x86/mm: fix checks against max_mapped_pfn This value is an inclusive one, i.e. this fixes an off-by-one in memory sharing and an off-by-two in shadow code. Signed-off-by: Jan Beulich --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -1267,8 +1267,8 @@ int relinquish_shared_pages(struct domai return 0; p2m_lock(p2m); - for (gfn = p2m->next_shared_gfn_to_relinquish; - gfn < p2m->max_mapped_pfn; gfn++ ) + for ( gfn = p2m->next_shared_gfn_to_relinquish; + gfn <= p2m->max_mapped_pfn; gfn++ ) { p2m_access_t a; p2m_type_t t; --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -3487,9 +3487,7 @@ int shadow_track_dirty_vram(struct domai struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram; struct p2m_domain *p2m = p2m_get_hostp2m(d); - if (end_pfn < begin_pfn - || begin_pfn > p2m->max_mapped_pfn - || end_pfn >= p2m->max_mapped_pfn) + if ( end_pfn < begin_pfn || end_pfn > p2m->max_mapped_pfn + 1 ) return -EINVAL; /* We perform p2m lookups, so lock the p2m upfront to avoid deadlock */