[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH V3] tools/libxc, xen/x86: Added xc_set_mem_access_multi()
Currently it is only possible to set mem_access restrictions only for a contiguous range of GFNs (or, as a particular case, for a single GFN). This patch introduces a new libxc function taking an array of GFNs. The alternative would be to set each page in turn, using a userspace-HV roundtrip for each call, and triggering a TLB flush per page set. Signed-off-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx> Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx> --- Changes since V2: - Renamed 'size' back to 'nr', but added a comment explaining the signifincance of the parameter. - Reverted modifications to p2m_set_altp2m_mem_access(). - Renamed _p2m_set_mem_access() to set_mem_access(), and removed the inline hint. - United return paths for set_mem_access(). - Modified p2m_xenmem_access_to_p2m_access() to return bool_t. - Removed stray blank line between declarations. - Replaced the return mem_access_memop() in the XENMEM_access_op case in common/compat/memory.c with a proper break. - Now using XEN_GUEST_HANDLE(const_uint64) and XEN_GUEST_HANDLE(const_uint8). - Added p2m_set_mem_access_multi() to the ARM side to fix a compilation error. --- tools/libxc/include/xenctrl.h | 9 +++ tools/libxc/xc_mem_access.c | 38 +++++++++++ xen/arch/arm/p2m.c | 9 +++ xen/arch/x86/mm/p2m.c | 149 ++++++++++++++++++++++++++++++++---------- xen/common/compat/memory.c | 25 +++++-- xen/common/mem_access.c | 11 ++++ xen/include/public/memory.h | 14 +++- xen/include/xen/p2m-common.h | 6 ++ xen/include/xlat.lst | 2 +- 9 files changed, 224 insertions(+), 39 deletions(-) diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h index 560ce7b..5e685a6 100644 --- a/tools/libxc/include/xenctrl.h +++ b/tools/libxc/include/xenctrl.h @@ -2126,6 +2126,15 @@ int xc_set_mem_access(xc_interface *xch, domid_t domain_id, uint32_t nr); /* + * Set an array of pages to their respective access in the access array. + * The nr parameter specifies the size of the pages and access arrays. + * The same allowed access types as for xc_set_mem_access() apply. + */ +int xc_set_mem_access_multi(xc_interface *xch, domid_t domain_id, + uint8_t *access, uint64_t *pages, + uint32_t nr); + +/* * Gets the mem access for the given page (returned in access on success) */ int xc_get_mem_access(xc_interface *xch, domid_t domain_id, diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c index eee088c..9536635 100644 --- a/tools/libxc/xc_mem_access.c +++ b/tools/libxc/xc_mem_access.c @@ -41,6 +41,44 @@ int xc_set_mem_access(xc_interface *xch, return do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao)); } +int xc_set_mem_access_multi(xc_interface *xch, + domid_t domain_id, + uint8_t *access, + uint64_t *pages, + uint32_t nr) +{ + DECLARE_HYPERCALL_BOUNCE(access, nr, XC_HYPERCALL_BUFFER_BOUNCE_IN); + DECLARE_HYPERCALL_BOUNCE(pages, nr * sizeof(uint64_t), + XC_HYPERCALL_BUFFER_BOUNCE_IN); + int rc; + + xen_mem_access_op_t mao = + { + .op = XENMEM_access_op_set_access_multi, + .domid = domain_id, + .access = XENMEM_access_default + 1, /* Invalid value */ + .pfn = ~0UL, /* Invalid GFN */ + .nr = nr, + }; + + if ( xc_hypercall_bounce_pre(xch, pages) || + xc_hypercall_bounce_pre(xch, access) ) + { + PERROR("Could not bounce memory for XENMEM_access_op_set_access_multi"); + return -1; + } + + set_xen_guest_handle(mao.pfn_list, pages); + set_xen_guest_handle(mao.access_list, access); + + rc = do_memory_op(xch, XENMEM_access_op, &mao, sizeof(mao)); + + xc_hypercall_bounce_post(xch, access); + xc_hypercall_bounce_post(xch, pages); + + return rc; +} + int xc_get_mem_access(xc_interface *xch, domid_t domain_id, uint64_t pfn, diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c index b648a9d..e65a9b8 100644 --- a/xen/arch/arm/p2m.c +++ b/xen/arch/arm/p2m.c @@ -1836,6 +1836,15 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, return 0; } +long p2m_set_mem_access_multi(struct domain *d, + const XEN_GUEST_HANDLE(const_uint64) pfn_list, + const XEN_GUEST_HANDLE(const_uint8) access_list, + uint32_t nr, uint32_t start, uint32_t mask, + unsigned int altp2m_idx) +{ + return -ENOTSUP; +} + int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access) { diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index 27f9d26..980661a 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -28,6 +28,7 @@ #include <xen/event.h> #include <public/vm_event.h> #include <asm/domain.h> +#include <xen/guest_access.h> /* copy_from_guest() */ #include <asm/page.h> #include <asm/paging.h> #include <asm/p2m.h> @@ -1793,21 +1794,36 @@ int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m, (current->domain != d)); } -/* - * Set access type for a region of gfns. - * If gfn == INVALID_GFN, sets the default access type. - */ -long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, - uint32_t start, uint32_t mask, xenmem_access_t access, - unsigned int altp2m_idx) +static int set_mem_access(struct domain *d, struct p2m_domain *p2m, + struct p2m_domain *ap2m, p2m_access_t a, + unsigned long gfn_l) { - struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL; - p2m_access_t a, _a; - p2m_type_t t; - mfn_t mfn; - unsigned long gfn_l; - long rc = 0; + int rc = 0; + if ( ap2m ) + { + rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l)); + /* If the corresponding mfn is invalid we will want to just skip it */ + if ( rc == -ESRCH ) + rc = 0; + } + else + { + mfn_t mfn; + p2m_access_t _a; + p2m_type_t t; + + mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL); + rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1); + } + + return rc; +} + +static bool_t p2m_xenmem_access_to_p2m_access(struct p2m_domain *p2m, + xenmem_access_t xaccess, + p2m_access_t *paccess) +{ static const p2m_access_t memaccess[] = { #define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac ACCESS(n), @@ -1823,6 +1839,34 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, #undef ACCESS }; + switch ( xaccess ) + { + case 0 ... ARRAY_SIZE(memaccess) - 1: + *paccess = memaccess[xaccess]; + break; + case XENMEM_access_default: + *paccess = p2m->default_access; + break; + default: + return 0; + } + + return 1; +} + +/* + * Set access type for a region of gfns. + * If gfn == INVALID_GFN, sets the default access type. + */ +long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, + uint32_t start, uint32_t mask, xenmem_access_t access, + unsigned int altp2m_idx) +{ + struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL; + p2m_access_t a; + unsigned long gfn_l; + long rc = 0; + /* altp2m view 0 is treated as the hostp2m */ if ( altp2m_idx ) { @@ -1833,17 +1877,8 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, ap2m = d->arch.altp2m_p2m[altp2m_idx]; } - switch ( access ) - { - case 0 ... ARRAY_SIZE(memaccess) - 1: - a = memaccess[access]; - break; - case XENMEM_access_default: - a = p2m->default_access; - break; - default: + if ( !p2m_xenmem_access_to_p2m_access(p2m, access, &a) ) return -EINVAL; - } /* If request to set default access. */ if ( gfn_eq(gfn, INVALID_GFN) ) @@ -1858,21 +1893,69 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l ) { - if ( ap2m ) + rc = set_mem_access(d, p2m, ap2m, a, gfn_l); + + if ( rc ) + break; + + /* Check for continuation if it's not the last iteration. */ + if ( nr > ++start && !(start & mask) && hypercall_preempt_check() ) { - rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l)); - /* If the corresponding mfn is invalid we will just skip it */ - if ( rc && rc != -ESRCH ) - break; + rc = start; + break; } - else + } + + if ( ap2m ) + p2m_unlock(ap2m); + p2m_unlock(p2m); + + return rc; +} + +long p2m_set_mem_access_multi(struct domain *d, + const XEN_GUEST_HANDLE(const_uint64) pfn_list, + const XEN_GUEST_HANDLE(const_uint8) access_list, + uint32_t nr, uint32_t start, uint32_t mask, + unsigned int altp2m_idx) +{ + struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL; + long rc = 0; + + /* altp2m view 0 is treated as the hostp2m */ + if ( altp2m_idx ) + { + if ( altp2m_idx >= MAX_ALTP2M || + d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) ) + return -EINVAL; + + ap2m = d->arch.altp2m_p2m[altp2m_idx]; + } + + p2m_lock(p2m); + if ( ap2m ) + p2m_lock(ap2m); + + while ( start < nr ) + { + p2m_access_t a; + uint8_t access; + uint64_t gfn_l; + + copy_from_guest_offset(&gfn_l, pfn_list, start, 1); + copy_from_guest_offset(&access, access_list, start, 1); + + if ( !p2m_xenmem_access_to_p2m_access(p2m, access, &a) ) { - mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL); - rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1); - if ( rc ) - break; + rc = -EINVAL; + break; } + rc = set_mem_access(d, p2m, ap2m, a, gfn_l); + + if ( rc ) + break; + /* Check for continuation if it's not the last iteration. */ if ( nr > ++start && !(start & mask) && hypercall_preempt_check() ) { diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c index 579040e..1b45b31 100644 --- a/xen/common/compat/memory.c +++ b/xen/common/compat/memory.c @@ -15,7 +15,6 @@ CHECK_TYPE(domid); #undef compat_domid_t #undef xen_domid_t -CHECK_mem_access_op; CHECK_vmemrange; #ifdef CONFIG_HAS_PASSTHROUGH @@ -71,6 +70,7 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) struct xen_add_to_physmap_batch *atpb; struct xen_remove_from_physmap *xrfp; struct xen_vnuma_topology_info *vnuma; + struct xen_mem_access_op *mao; } nat; union { struct compat_memory_reservation rsrv; @@ -78,6 +78,7 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) struct compat_add_to_physmap atp; struct compat_add_to_physmap_batch atpb; struct compat_vnuma_topology_info vnuma; + struct compat_mem_access_op mao; } cmp; set_xen_guest_handle(nat.hnd, COMPAT_ARG_XLAT_VIRT_BASE); @@ -321,9 +322,22 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) } case XENMEM_access_op: - return mem_access_memop(cmd, - guest_handle_cast(compat, - xen_mem_access_op_t)); + { + if ( copy_from_guest(&cmp.mao, compat, 1) ) + return -EFAULT; + +#define XLAT_mem_access_op_HNDL_pfn_list(_d_, _s_) \ + guest_from_compat_handle((_d_)->pfn_list, (_s_)->pfn_list) +#define XLAT_mem_access_op_HNDL_access_list(_d_, _s_) \ + guest_from_compat_handle((_d_)->access_list, (_s_)->access_list) + + XLAT_mem_access_op(nat.mao, &cmp.mao); + +#undef XLAT_mem_access_op_HNDL_pfn_list +#undef XLAT_mem_access_op_HNDL_access_list + + break; + } case XENMEM_get_vnumainfo: { @@ -520,6 +534,9 @@ int compat_memory_op(unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) compat) rc = -EFAULT; break; + case XENMEM_access_op: + break; + default: domain_crash(current->domain); split = 0; diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c index 82f4bad..565a320 100644 --- a/xen/common/mem_access.c +++ b/xen/common/mem_access.c @@ -76,6 +76,17 @@ int mem_access_memop(unsigned long cmd, } break; + case XENMEM_access_op_set_access_multi: + rc = p2m_set_mem_access_multi(d, mao.pfn_list, mao.access_list, mao.nr, + start_iter, MEMOP_CMD_MASK, 0); + if ( rc > 0 ) + { + ASSERT(!(rc & MEMOP_CMD_MASK)); + rc = hypercall_create_continuation(__HYPERVISOR_memory_op, "lh", + XENMEM_access_op | rc, arg); + } + break; + case XENMEM_access_op_get_access: { xenmem_access_t access; diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h index 3badfb9..a5547a9 100644 --- a/xen/include/public/memory.h +++ b/xen/include/public/memory.h @@ -410,6 +410,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_paging_op_t); * #define XENMEM_access_op_enable_emulate 2 * #define XENMEM_access_op_disable_emulate 3 */ +#define XENMEM_access_op_set_access_multi 4 typedef enum { XENMEM_access_n, @@ -442,7 +443,8 @@ struct xen_mem_access_op { uint8_t access; domid_t domid; /* - * Number of pages for set op + * Number of pages for set op (or size of pfn_list for + * XENMEM_access_op_set_access_multi) * Ignored on setting default access and other ops */ uint32_t nr; @@ -452,6 +454,16 @@ struct xen_mem_access_op { * ~0ull is used to set and get the default access for pages */ uint64_aligned_t pfn; + /* + * List of pfns to set access for + * Used only with XENMEM_access_op_set_access_multi + */ + XEN_GUEST_HANDLE(const_uint64) pfn_list; + /* + * Corresponding list of access settings for pfn_list + * Used only with XENMEM_access_op_set_access_multi + */ + XEN_GUEST_HANDLE(const_uint8) access_list; }; typedef struct xen_mem_access_op xen_mem_access_op_t; DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t); diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h index b4f9077..3be1e91 100644 --- a/xen/include/xen/p2m-common.h +++ b/xen/include/xen/p2m-common.h @@ -53,6 +53,12 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, uint32_t start, uint32_t mask, xenmem_access_t access, unsigned int altp2m_idx); +long p2m_set_mem_access_multi(struct domain *d, + const XEN_GUEST_HANDLE(const_uint64) pfn_list, + const XEN_GUEST_HANDLE(const_uint8) access_list, + uint32_t nr, uint32_t start, uint32_t mask, + unsigned int altp2m_idx); + /* * Get access type for a gfn. * If gfn == INVALID_GFN, gets the default access type. diff --git a/xen/include/xlat.lst b/xen/include/xlat.lst index 801a1c1..bdf1d05 100644 --- a/xen/include/xlat.lst +++ b/xen/include/xlat.lst @@ -68,7 +68,7 @@ ! memory_exchange memory.h ! memory_map memory.h ! memory_reservation memory.h -? mem_access_op memory.h +! mem_access_op memory.h ! pod_target memory.h ! remove_from_physmap memory.h ! reserved_device_memory_map memory.h -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |