diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index f75011e..2417fe9 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -4598,93 +4598,112 @@ static int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap xat unsigned long prev_mfn, mfn = 0, gpfn; int rc; - switch ( xatp.space ) - { - case XENMAPSPACE_shared_info: - if ( xatp.idx == 0 ) - mfn = virt_to_mfn(d->shared_info); - break; - case XENMAPSPACE_grant_table: - spin_lock(&d->grant_table->lock); + if ( xatp.space != XENMAPSPACE_gmfn_range ) + xatp.size = 1; - if ( d->grant_table->gt_version == 0 ) - d->grant_table->gt_version = 1; - - if ( d->grant_table->gt_version == 2 && - (xatp.idx & XENMAPIDX_grant_table_status) ) + while ( xatp.size > 0 ) + { + if ( hypercall_preempt_check() ) { - xatp.idx &= ~XENMAPIDX_grant_table_status; - if ( xatp.idx < nr_status_frames(d->grant_table) ) - mfn = virt_to_mfn(d->grant_table->status[xatp.idx]); + rc = -EAGAIN; + break; } - else + + switch ( xatp.space ) { - if ( (xatp.idx >= nr_grant_frames(d->grant_table)) && - (xatp.idx < max_nr_grant_frames) ) - gnttab_grow_table(d, xatp.idx + 1); + case XENMAPSPACE_shared_info: + if ( xatp.idx == 0 ) + mfn = virt_to_mfn(d->shared_info); + break; + case XENMAPSPACE_grant_table: + spin_lock(&d->grant_table->lock); - if ( xatp.idx < nr_grant_frames(d->grant_table) ) - mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]); - } + if ( d->grant_table->gt_version == 0 ) + d->grant_table->gt_version = 1; - spin_unlock(&d->grant_table->lock); - break; - case XENMAPSPACE_gmfn: - { - p2m_type_t p2mt; + if ( d->grant_table->gt_version == 2 && + (xatp.idx & XENMAPIDX_grant_table_status) ) + { + xatp.idx &= ~XENMAPIDX_grant_table_status; + if ( xatp.idx < nr_status_frames(d->grant_table) ) + mfn = virt_to_mfn(d->grant_table->status[xatp.idx]); + } + else + { + if ( (xatp.idx >= nr_grant_frames(d->grant_table)) && + (xatp.idx < max_nr_grant_frames) ) + gnttab_grow_table(d, xatp.idx + 1); + + if ( xatp.idx < nr_grant_frames(d->grant_table) ) + mfn = virt_to_mfn(d->grant_table->shared_raw[xatp.idx]); + } - xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt)); - /* If the page is still shared, exit early */ - if ( p2m_is_shared(p2mt) ) + spin_unlock(&d->grant_table->lock); + break; + case XENMAPSPACE_gmfn: { - rcu_unlock_domain(d); - return -ENOMEM; + p2m_type_t p2mt; + + xatp.idx = mfn_x(gfn_to_mfn_unshare(d, xatp.idx, &p2mt)); + /* If the page is still shared, exit early */ + if ( p2m_is_shared(p2mt) ) + { + rcu_unlock_domain(d); + return -ENOMEM; + } + if ( !get_page_from_pagenr(xatp.idx, d) ) + break; + mfn = xatp.idx; + page = mfn_to_page(mfn); + break; } - if ( !get_page_from_pagenr(xatp.idx, d) ) + default: break; - mfn = xatp.idx; - page = mfn_to_page(mfn); - break; - } - default: - break; - } + } + + if ( !paging_mode_translate(d) || (mfn == 0) ) + { + if ( page ) + put_page(page); + rcu_unlock_domain(d); + return -EINVAL; + } + + domain_lock(d); - if ( !paging_mode_translate(d) || (mfn == 0) ) - { if ( page ) put_page(page); - rcu_unlock_domain(d); - return -EINVAL; - } - domain_lock(d); + /* Remove previously mapped page if it was present. */ + prev_mfn = gmfn_to_mfn(d, xatp.gpfn); + if ( mfn_valid(prev_mfn) ) + { + if ( is_xen_heap_mfn(prev_mfn) ) + /* Xen heap frames are simply unhooked from this phys slot. */ + guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0); + else + /* Normal domain memory is freed, to avoid leaking memory. */ + guest_remove_page(d, xatp.gpfn); + } - if ( page ) - put_page(page); + /* Unmap from old location, if any. */ + gpfn = get_gpfn_from_mfn(mfn); + ASSERT( gpfn != SHARED_M2P_ENTRY ); + if ( gpfn != INVALID_M2P_ENTRY ) + guest_physmap_remove_page(d, gpfn, mfn, 0); - /* Remove previously mapped page if it was present. */ - prev_mfn = gmfn_to_mfn(d, xatp.gpfn); - if ( mfn_valid(prev_mfn) ) - { - if ( is_xen_heap_mfn(prev_mfn) ) - /* Xen heap frames are simply unhooked from this phys slot. */ - guest_physmap_remove_page(d, xatp.gpfn, prev_mfn, 0); - else - /* Normal domain memory is freed, to avoid leaking memory. */ - guest_remove_page(d, xatp.gpfn); - } + /* Map at new location. */ + rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0); - /* Unmap from old location, if any. */ - gpfn = get_gpfn_from_mfn(mfn); - ASSERT( gpfn != SHARED_M2P_ENTRY ); - if ( gpfn != INVALID_M2P_ENTRY ) - guest_physmap_remove_page(d, gpfn, mfn, 0); + domain_unlock(d); - /* Map at new location. */ - rc = guest_physmap_add_page(d, xatp.gpfn, mfn, 0); + xatp.idx++; + xatp.gpfn++; + xatp.size--; - domain_unlock(d); + if ( rc != 0 ) + break; + } return rc; } @@ -4714,10 +4733,15 @@ long arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) return -EPERM; } - xenmem_add_to_physmap(d, xatp); - + rc = xenmem_add_to_physmap(d, xatp); rcu_unlock_domain(d); + if ( rc == -EAGAIN ) + { + rc = hypercall_create_continuation( + __HYPERVISOR_memory_op, "ih", op, arg); + } + return rc; } diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c index 2c05099..05c3098 100644 --- a/xen/arch/x86/x86_64/compat/mm.c +++ b/xen/arch/x86/x86_64/compat/mm.c @@ -63,6 +63,10 @@ int compat_arch_memory_op(int op, XEN_GUEST_HANDLE(void) arg) XLAT_add_to_physmap(nat, &cmp); rc = arch_memory_op(op, guest_handle_from_ptr(nat, void)); + XLAT_add_to_physmap(&cmp, nat); + + if ( copy_to_guest(arg, &cmp, 1) ) + return -EFAULT; break; } diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index 08355e3..c5b78a8 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -208,10 +208,14 @@ struct xen_add_to_physmap { /* Which domain to change the mapping for. */ domid_t domid; + /* Number of pages to go through for gmfn_range */ + uint16_t size; + /* Source mapping space. */ #define XENMAPSPACE_shared_info 0 /* shared info page */ #define XENMAPSPACE_grant_table 1 /* grant table page */ #define XENMAPSPACE_gmfn 2 /* GMFN */ +#define XENMAPSPACE_gmfn_range 3 /* GMFN range */ unsigned int space; #define XENMAPIDX_grant_table_status 0x80000000