[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen stable-4.14] x86/HVM: serialize pinned cache attribute list manipulation
commit 99e9afaeb00b5a00d6fc3e2d2e593f9ed35695d5 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Tue Mar 21 12:01:01 2023 +0000 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Tue Mar 21 12:07:44 2023 +0000 x86/HVM: serialize pinned cache attribute list manipulation While the RCU variants of list insertion and removal allow lockless list traversal (with RCU just read-locked), insertions and removals still need serializing amongst themselves. To keep things simple, use the domain lock for this purpose. This is CVE-2022-42334 / part of XSA-428. Fixes: 642123c5123f ("x86/hvm: provide XEN_DMOP_pin_memory_cacheattr") Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> Reviewed-by: Julien Grall <jgrall@xxxxxxxxxx> (cherry picked from commit 829ec245cf66560e3b50d140ccb3168e7fb7c945) --- xen/arch/x86/hvm/mtrr.c | 51 ++++++++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 20 deletions(-) diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c index 752524684e..97ddc2075e 100644 --- a/xen/arch/x86/hvm/mtrr.c +++ b/xen/arch/x86/hvm/mtrr.c @@ -595,7 +595,7 @@ static void free_pinned_cacheattr_entry(struct rcu_head *rcu) int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, uint64_t gfn_end, uint32_t type) { - struct hvm_mem_pinned_cacheattr_range *range; + struct hvm_mem_pinned_cacheattr_range *range, *newr; unsigned int nr = 0; int rc = 1; @@ -609,14 +609,15 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, { case XEN_DOMCTL_DELETE_MEM_CACHEATTR: /* Remove the requested range. */ - rcu_read_lock(&pinned_cacheattr_rcu_lock); - list_for_each_entry_rcu ( range, - &d->arch.hvm.pinned_cacheattr_ranges, - list ) + domain_lock(d); + list_for_each_entry ( range, + &d->arch.hvm.pinned_cacheattr_ranges, + list ) if ( range->start == gfn_start && range->end == gfn_end ) { - rcu_read_unlock(&pinned_cacheattr_rcu_lock); list_del_rcu(&range->list); + domain_unlock(d); + type = range->type; call_rcu(&range->rcu, free_pinned_cacheattr_entry); p2m_memory_type_changed(d); @@ -637,7 +638,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, } return 0; } - rcu_read_unlock(&pinned_cacheattr_rcu_lock); + domain_unlock(d); return -ENOENT; case PAT_TYPE_UC_MINUS: @@ -652,7 +653,10 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, return -EINVAL; } - rcu_read_lock(&pinned_cacheattr_rcu_lock); + newr = xzalloc(struct hvm_mem_pinned_cacheattr_range); + + domain_lock(d); + list_for_each_entry_rcu ( range, &d->arch.hvm.pinned_cacheattr_ranges, list ) @@ -670,27 +674,34 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start, } ++nr; } - rcu_read_unlock(&pinned_cacheattr_rcu_lock); + if ( rc <= 0 ) - return rc; + /* nothing */; + else if ( nr >= 64 /* The limit is arbitrary. */ ) + rc = -ENOSPC; + else if ( !newr ) + rc = -ENOMEM; + else + { + newr->start = gfn_start; + newr->end = gfn_end; + newr->type = type; - if ( nr >= 64 /* The limit is arbitrary. */ ) - return -ENOSPC; + list_add_rcu(&newr->list, &d->arch.hvm.pinned_cacheattr_ranges); - range = xzalloc(struct hvm_mem_pinned_cacheattr_range); - if ( range == NULL ) - return -ENOMEM; + newr = NULL; + rc = 0; + } + + domain_unlock(d); - range->start = gfn_start; - range->end = gfn_end; - range->type = type; + xfree(newr); - list_add_rcu(&range->list, &d->arch.hvm.pinned_cacheattr_ranges); p2m_memory_type_changed(d); if ( type != PAT_TYPE_WRBACK ) flush_all(FLUSH_CACHE); - return 0; + return rc; } static int hvm_save_mtrr_msr(struct vcpu *v, hvm_domain_context_t *h) -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.14
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |