|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 1/2] x86/mem_sharing: reorder when pages are unlocked and released
Patch 0502e0adae2 "x86: correct instances of PGC_allocated clearing" introduced
grabbing extra references for pages that drop references tied to PGC_allocated.
However, the way these extra references were grabbed were incorrect, resulting
in both share_pages and unshare_pages failing. There actually is no need to
grab extra references, only a reordering was needed for when the existing
references are being released. This is in accordance to the XSA-242
recommendation of not calling _put_page_type while also holding the page_lock
for that page.
Signed-off-by: Tamas K Lengyel <tamas@xxxxxxxxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
Cc: Wei Liu <wei.liu2@xxxxxxxxxx>
Cc: Roger Pau Monne <roger.pau@xxxxxxxxxx>
---
xen/arch/x86/mm/mem_sharing.c | 31 ++++++++-----------------------
1 file changed, 8 insertions(+), 23 deletions(-)
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 5ac9d8f54c..345a1778f9 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -900,6 +900,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn,
shr_handle_t sh,
p2m_type_t smfn_type, cmfn_type;
struct two_gfns tg;
struct rmap_iterator ri;
+ unsigned long put_count = 0;
get_two_gfns(sd, sgfn, &smfn_type, NULL, &smfn,
cd, cgfn, &cmfn_type, NULL, &cmfn, 0, &tg);
@@ -964,15 +965,6 @@ static int share_pages(struct domain *sd, gfn_t sgfn,
shr_handle_t sh,
goto err_out;
}
- /* Acquire an extra reference, for the freeing below to be safe. */
- if ( !get_page(cpage, cd) )
- {
- ret = -EOVERFLOW;
- mem_sharing_page_unlock(secondpg);
- mem_sharing_page_unlock(firstpg);
- goto err_out;
- }
-
/* Merge the lists together */
rmap_seed_iterator(cpage, &ri);
while ( (gfn = rmap_iterate(cpage, &ri)) != NULL)
@@ -984,7 +976,7 @@ static int share_pages(struct domain *sd, gfn_t sgfn,
shr_handle_t sh,
* Don't change the type of rmap for the client page. */
rmap_del(gfn, cpage, 0);
rmap_add(gfn, spage);
- put_page_and_type(cpage);
+ put_count++;
d = get_domain_by_id(gfn->domain);
BUG_ON(!d);
BUG_ON(set_shared_p2m_entry(d, gfn->gfn, smfn));
@@ -1002,7 +994,9 @@ static int share_pages(struct domain *sd, gfn_t sgfn,
shr_handle_t sh,
/* Free the client page */
if(test_and_clear_bit(_PGC_allocated, &cpage->count_info))
put_page(cpage);
- put_page(cpage);
+
+ while(put_count--)
+ put_page_and_type(cpage);
/* We managed to free a domain page. */
atomic_dec(&nr_shared_mfns);
@@ -1167,20 +1161,11 @@ int __mem_sharing_unshare_page(struct domain *d,
{
if ( !last_gfn )
mem_sharing_gfn_destroy(page, d, gfn_info);
- put_page_and_type(page);
mem_sharing_page_unlock(page);
- if ( last_gfn )
- {
- if ( !get_page(page, d) )
- {
- put_gfn(d, gfn);
- domain_crash(d);
- return -EOVERFLOW;
- }
- if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
- put_page(page);
+ if ( last_gfn &&
+ test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
- }
+ put_page_and_type(page);
put_gfn(d, gfn);
return 0;
--
2.20.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |