[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v6 04/10] xen: Introduce XENMEM_soft_reset operation
New operation reassigns all memory pages from source domain to the destination domain mapping them at exactly the same GFNs. Pages mapped more than once (e.g. grants) are being copied. Signed-off-by: Vitaly Kuznetsov <vkuznets@xxxxxxxxxx> --- xen/common/memory.c | 232 ++++++++++++++++++++++++++++++++++++++++++++ xen/include/public/memory.h | 34 ++++++- 2 files changed, 265 insertions(+), 1 deletion(-) diff --git a/xen/common/memory.c b/xen/common/memory.c index 063a1c5..385218c 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -580,6 +580,234 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg) return rc; } +static long memory_soft_reset(XEN_GUEST_HANDLE_PARAM(xen_memory_soft_reset_t) arg) +{ + long rc = 0; + struct xen_memory_soft_reset softr; + struct domain *source_d, *dest_d; + unsigned long mfn, mfn_new, gmfn, last_gmfn, count; + unsigned int order; + p2m_type_t p2mt; + struct page_info *page, *new_page, *next_page; + int drop_dom_ref; + + if ( copy_from_guest(&softr, arg, 1) ) + return -EFAULT; + + if ( softr.source_domid == softr.dest_domid ) + return -EINVAL; + + source_d = rcu_lock_domain_by_any_id(softr.source_domid); + if ( source_d == NULL ) + { + rc = -ESRCH; + goto fail_early; + } + + dest_d = rcu_lock_domain_by_any_id(softr.dest_domid); + if ( dest_d == NULL ) + { + rc = -ESRCH; + rcu_unlock_domain(source_d); + goto fail_early; + } + + if ( dest_d->is_dying ) + { + rc = -EINVAL; + goto fail; + } + + if ( !source_d->is_dying ) + { + /* + * Make sure no allocation/remapping for the source domain is ongoing + * and set is_dying flag to prevent such actions in future. + */ + spin_lock(&source_d->page_alloc_lock); + source_d->is_dying = DOMDYING_locked; + spin_unlock(&source_d->page_alloc_lock); + } + + last_gmfn = domain_get_maximum_gpfn(source_d); + gmfn = softr.gmfn_start; + while ( gmfn <= last_gmfn ) + { + page = get_page_from_gfn(source_d, gmfn, &p2mt, 0); + if ( unlikely(page == NULL) ) + { +#ifdef CONFIG_X86 + count = 0; + while ( p2m_is_pod(p2mt) ) + { + count++; + if ( gmfn + count > last_gmfn ) + break; + page = get_page_from_gfn(source_d, gmfn + count, &p2mt, 0); + if ( page ) + { + put_page(page); + page = NULL; + break; + } + } + + if ( !count ) + gmfn++; + + while ( count ) + { + order = get_order_from_pages(count); + if ( (1ul << order) > count ) + order -= 1; + rc = guest_physmap_mark_populate_on_demand(dest_d, gmfn, order); + if ( rc ) + goto fail; + count -= 1ul << order; + gmfn += 1ul << order; + } +#else + gmfn++; +#endif + goto preempt_check; + } + + mfn = page_to_mfn(page); + if ( unlikely(!mfn_valid(mfn)) ) + { + put_page(page); + gmfn++; + goto preempt_check; + } + + next_page = page; + + /* + * A normal page is supposed to have count_info = 2 (1 from the domain + * and 1 from get_page_from_gfn() above). All other pages need to be + * copied. + */ + count = 0; + while ( next_page && !is_xen_heap_page(next_page) && + (next_page->count_info & PGC_count_mask) == 2 && + page_to_mfn(next_page) == mfn + count ) + { + count++; + drop_dom_ref = 0; + spin_lock(&source_d->page_alloc_lock); + page_set_owner(next_page, NULL); + page_list_del(next_page, &source_d->page_list); + source_d->tot_pages -= 1; + if ( unlikely(source_d->tot_pages == 0) ) + drop_dom_ref = 1; + spin_unlock(&source_d->page_alloc_lock); + put_page(next_page); + if ( drop_dom_ref ) + put_domain(source_d); + + if ( unlikely(assign_pages(dest_d, next_page, 0, 0)) ) + { + printk(XENLOG_G_INFO "Failed to assign Dom%d's MFN %lx" + " to Dom%d\n", source_d->domain_id, mfn, + dest_d->domain_id); + rc = -EFAULT; + goto fail; + } + + if ( unlikely(gmfn + count > last_gmfn) ) + { + next_page = NULL; + break; + } + + next_page = get_page_from_gfn(source_d, gmfn + count, &p2mt, 0); + } + + if ( next_page && count ) + put_page(next_page); + + if ( !count && (page->count_info & PGC_count_mask) != 2 ) + { + new_page = alloc_domheap_page(dest_d, 0); + if ( unlikely(new_page == NULL) ) + { + printk(XENLOG_G_INFO "Failed to alloc a page to replace" + " Dom%d's GFN %lx (MFN %lx) for Dom %d\n", + source_d->domain_id, gmfn, mfn, dest_d->domain_id); + rc = -ENOMEM; + put_page(page); + goto fail; + } + mfn_new = page_to_mfn(new_page); + copy_domain_page(mfn_new, mfn); + mfn = mfn_new; + put_page(page); + if ( guest_physmap_add_page(dest_d, gmfn, mfn, 0) ) + { + printk(XENLOG_G_INFO "Failed to add new GFN %lx" + " (MFN %lx) to Dom%d\n", + gmfn, mfn, dest_d->domain_id); + rc = -EFAULT; + goto fail; + } + gmfn++; + softr.nr_transferred++; + } + else if ( !count ) + { + put_page(page); + gmfn++; + goto preempt_check; + } + + while ( count ) + { + order = get_order_from_pages(count); + if ( (1ul << order) > count ) + order -= 1; + + guest_physmap_remove_page(source_d, gmfn, mfn, order); + + if ( guest_physmap_add_page(dest_d, gmfn, mfn, order) ) + { + printk(XENLOG_G_INFO "Failed to re-add Dom%d's GFN %lx" + " (MFN %lx, order: %u) to Dom%d\n", source_d->domain_id, + gmfn, mfn, order, dest_d->domain_id); + rc = -EFAULT; + goto fail; + } + + softr.nr_transferred += 1ul << order; + count -= 1ul << order; + gmfn += 1ul << order; + mfn += 1ul << order; + } + + preempt_check: + if ( hypercall_preempt_check() && gmfn <= last_gmfn ) + { + softr.gmfn_start = gmfn; + rcu_unlock_domain(source_d); + rcu_unlock_domain(dest_d); + if ( __copy_field_to_guest(arg, &softr, gmfn_start) ) + return -EFAULT; + if ( __copy_field_to_guest(arg, &softr, nr_transferred) ) + return -EFAULT; + return hypercall_create_continuation( + __HYPERVISOR_memory_op, "lh", XENMEM_soft_reset, arg); + } + } + + fail: + rcu_unlock_domain(dest_d); + rcu_unlock_domain(source_d); + fail_early: + if ( __copy_field_to_guest(arg, &softr, nr_transferred) ) + rc = -EFAULT; + + return rc; +} + static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap *xatp, unsigned int start) @@ -828,6 +1056,10 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t)); break; + case XENMEM_soft_reset: + rc = memory_soft_reset(guest_handle_cast(arg, xen_memory_soft_reset_t)); + break; + case XENMEM_maximum_ram_page: if ( unlikely(start_extent) ) return -ENOSYS; diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index 832559a..8117341 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -573,7 +573,39 @@ struct xen_vnuma_topology_info { typedef struct xen_vnuma_topology_info xen_vnuma_topology_info_t; DEFINE_XEN_GUEST_HANDLE(xen_vnuma_topology_info_t); -/* Next available subop number is 27 */ +/* + * Transfer all memory pages from one domain to the other. Pages are unmapped + * from the source domain and mapped at the same GFNs to the destination + * domain. This hypercall has a side-effect of moving the source domain to the + * 'dying' state. + * + * If a particular page is mapped more then once in the source domain a new + * empty page is being allocated for the destination domain and the content is + * being copied. The original page remains mapped to the source domain. + * + * The caller has to be priviliged and it is supposed to set gmfn_start to 0, + * this field is required for the hypercall continuation. + */ + +#define XENMEM_soft_reset 27 +struct xen_memory_soft_reset { + /* + * [IN] Memory transfer details. + */ + domid_t source_domid; /* steal pages from */ + domid_t dest_domid; /* assign pages to */ + + xen_pfn_t gmfn_start; /* start from gmfn */ + + /* + * [OUT] Number of transfered pages including new allocations. + */ + xen_ulong_t nr_transferred; +}; +typedef struct xen_memory_soft_reset xen_memory_soft_reset_t; +DEFINE_XEN_GUEST_HANDLE(xen_memory_soft_reset_t); + +/* Next available subop number is 28 */ #endif /* __XEN_PUBLIC_MEMORY_H__ */ -- 1.9.3 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |