|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 1/2] x86/mem_access: Make the mem_access ops generic
This patch does the following:
1. Deprecate the HVMOP_[sg]et_mem_access HVM ops.
2. Move the ops under XENMEM_access_opi.
3. Rename enums and structs to be more generic rather than HVM specific.
4. Change the preemption handling for XENMEM_set_mem_access to use the
interface structures.
Signed-off-by: Aravindh Puthiyaparambil <aravindp@xxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
Cc: Tim Deegan <tim@xxxxxxx>
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 38c491e..eeaa72e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4589,79 +4589,10 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE_PARAM(void) arg)
}
case HVMOP_set_mem_access:
- {
- struct xen_hvm_set_mem_access a;
- struct domain *d;
-
- if ( copy_from_guest(&a, arg, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(a.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto param_fail5;
-
- rc = xsm_hvm_param(XSM_TARGET, d, op);
- if ( rc )
- goto param_fail5;
-
- rc = -EINVAL;
- if ( (a.first_pfn != ~0ull) &&
- (a.nr < start_iter ||
- ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
- ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d))) )
- goto param_fail5;
-
- rc = p2m_set_mem_access(d, a.first_pfn, a.nr, start_iter,
- HVMOP_op_mask, a.hvmmem_access);
- if ( rc > 0 )
- {
- start_iter = rc;
- rc = -EAGAIN;
- }
-
- param_fail5:
- rcu_unlock_domain(d);
- break;
- }
-
case HVMOP_get_mem_access:
{
- struct xen_hvm_get_mem_access a;
- struct domain *d;
- hvmmem_access_t access;
-
- if ( copy_from_guest(&a, arg, 1) )
- return -EFAULT;
-
- rc = rcu_lock_remote_domain_by_id(a.domid, &d);
- if ( rc != 0 )
- return rc;
-
- rc = -EINVAL;
- if ( !is_hvm_domain(d) )
- goto param_fail6;
-
- rc = xsm_hvm_param(XSM_TARGET, d, op);
- if ( rc )
- goto param_fail6;
-
- rc = -EINVAL;
- if ( (a.pfn > domain_get_maximum_gpfn(d)) && a.pfn != ~0ull )
- goto param_fail6;
-
- rc = p2m_get_mem_access(d, a.pfn, &access);
- if ( rc != 0 )
- goto param_fail6;
-
- a.hvmmem_access = access;
- rc = __copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
-
- param_fail6:
- rcu_unlock_domain(d);
+ gdprintk(XENLOG_DEBUG, "Deprecated HVM op %ld.\n", op);
+ rc = -ENOSYS;
break;
}
diff --git a/xen/arch/x86/mm/mem_access.c b/xen/arch/x86/mm/mem_access.c
index 50aaf27..27415c1 100644
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -21,31 +21,93 @@
*/
+#include <xen/sched.h>
+#include <xen/guest_access.h>
#include <asm/p2m.h>
#include <asm/mem_event.h>
+#include <xsm/xsm.h>
-int mem_access_memop(struct domain *d, xen_mem_event_op_t *meo)
+int mem_access_memop(XEN_GUEST_HANDLE_PARAM(void) arg)
{
int rc;
+ xen_mem_access_op_t mao;
+ struct domain *d;
+
+ if ( copy_from_guest(&mao, arg, 1) )
+ return -EFAULT;
+
+ rc = rcu_lock_live_remote_domain_by_id(mao.domid, &d);
+ if ( rc )
+ return rc;
+
+ if ( !is_hvm_domain(d) )
+ return -EINVAL;
+
+ rc = xsm_mem_event_op(XSM_TARGET, d, XENMEM_access_op);
+ if ( rc )
+ goto out;
if ( unlikely(!d->mem_event->access.ring_page) )
return -ENODEV;
- switch( meo->op )
+ switch ( mao.op )
{
case XENMEM_access_op_resume:
{
p2m_mem_access_resume(d);
rc = 0;
+ break;
+ }
+
+ case XENMEM_access_op_set_access:
+ {
+ rc = -EINVAL;
+
+ if ( (mao.pfn != ~0ull) &&
+ (((mao.pfn + mao.nr - 1) < mao.pfn) ||
+ ((mao.pfn + mao.nr - 1) > domain_get_maximum_gpfn(d))) )
+ break;
+
+ rc = p2m_set_mem_access(d, mao.pfn, mao.nr, mao.xenmem_access);
+ if ( rc > 0 )
+ {
+ mao.pfn += mao.nr - rc;
+ mao.nr = rc;
+ if ( __copy_to_guest(arg, &mao, 1) )
+ rc = -EFAULT;
+ else
+ rc = hypercall_create_continuation(__HYPERVISOR_memory_op,
"lh",
+ XENMEM_access_op, arg);
+ }
+ break;
+ }
+
+ case XENMEM_access_op_get_access:
+ {
+ xenmem_access_t access;
+
+ rc = -EINVAL;
+ if ( (mao.pfn > domain_get_maximum_gpfn(d)) && mao.pfn != ~0ull )
+ break;
+
+ rc = p2m_get_mem_access(d, mao.pfn, &access);
+ if ( rc != 0 )
+ break;
+
+ mao.xenmem_access = access;
+ rc = __copy_to_guest(arg, &mao, 1) ? -EFAULT : 0;
+
+ break;
}
- break;
default:
rc = -ENOSYS;
break;
}
+ out:
+ rcu_unlock_domain(d);
return rc;
}
diff --git a/xen/arch/x86/mm/mem_event.c b/xen/arch/x86/mm/mem_event.c
index d00e404..36b9dba 100644
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -458,9 +458,6 @@ int do_mem_event_op(int op, uint32_t domain, void *arg)
case XENMEM_paging_op:
ret = mem_paging_memop(d, (xen_mem_event_op_t *) arg);
break;
- case XENMEM_access_op:
- ret = mem_access_memop(d, (xen_mem_event_op_t *) arg);
- break;
case XENMEM_sharing_op:
ret = mem_sharing_memop(d, (xen_mem_sharing_op_t *) arg);
break;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index c38f334..d932f19 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1350,7 +1350,7 @@ void p2m_mem_access_resume(struct domain *d)
/* Set access type for a region of pfns.
* If start_pfn == -1ul, sets the default access type */
long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
- uint32_t start, uint32_t mask, hvmmem_access_t access)
+ xenmem_access_t access)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_access_t a, _a;
@@ -1359,7 +1359,7 @@ long p2m_set_mem_access(struct domain *d, unsigned long
pfn, uint32_t nr,
long rc = 0;
static const p2m_access_t memaccess[] = {
-#define ACCESS(ac) [HVMMEM_access_##ac] = p2m_access_##ac
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
ACCESS(n),
ACCESS(r),
ACCESS(w),
@@ -1378,7 +1378,7 @@ long p2m_set_mem_access(struct domain *d, unsigned long
pfn, uint32_t nr,
case 0 ... ARRAY_SIZE(memaccess) - 1:
a = memaccess[access];
break;
- case HVMMEM_access_default:
+ case XENMEM_access_default:
a = p2m->default_access;
break;
default:
@@ -1393,7 +1393,7 @@ long p2m_set_mem_access(struct domain *d, unsigned long
pfn, uint32_t nr,
}
p2m_lock(p2m);
- for ( pfn += start; nr > start; ++pfn )
+ for ( ; ; ++pfn )
{
mfn = p2m->get_entry(p2m, pfn, &t, &_a, 0, NULL);
if ( p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a) == 0 )
@@ -1403,9 +1403,9 @@ long p2m_set_mem_access(struct domain *d, unsigned long
pfn, uint32_t nr,
}
/* Check for continuation if it's not the last interation. */
- if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
+ if ( !--nr || hypercall_preempt_check() )
{
- rc = start;
+ rc = nr;
break;
}
}
@@ -1416,23 +1416,23 @@ long p2m_set_mem_access(struct domain *d, unsigned long
pfn, uint32_t nr,
/* Get access type for a pfn
* If pfn == -1ul, gets the default access type */
int p2m_get_mem_access(struct domain *d, unsigned long pfn,
- hvmmem_access_t *access)
+ xenmem_access_t *access)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_type_t t;
p2m_access_t a;
mfn_t mfn;
- static const hvmmem_access_t memaccess[] = {
- HVMMEM_access_n,
- HVMMEM_access_r,
- HVMMEM_access_w,
- HVMMEM_access_rw,
- HVMMEM_access_x,
- HVMMEM_access_rx,
- HVMMEM_access_wx,
- HVMMEM_access_rwx,
- HVMMEM_access_rx2rw
+ static const xenmem_access_t memaccess[] = {
+ XENMEM_access_n,
+ XENMEM_access_r,
+ XENMEM_access_w,
+ XENMEM_access_rw,
+ XENMEM_access_x,
+ XENMEM_access_rx,
+ XENMEM_access_wx,
+ XENMEM_access_rwx,
+ XENMEM_access_rx2rw
};
/* If request to get default access */
diff --git a/xen/arch/x86/x86_64/compat/mm.c b/xen/arch/x86/x86_64/compat/mm.c
index 0a8408b..77d593d 100644
--- a/xen/arch/x86/x86_64/compat/mm.c
+++ b/xen/arch/x86/x86_64/compat/mm.c
@@ -4,6 +4,7 @@
#include <compat/xen.h>
#include <asm/mem_event.h>
#include <asm/mem_sharing.h>
+#include <asm/mem_access.h>
int compat_set_gdt(XEN_GUEST_HANDLE_PARAM(uint) frame_list, unsigned int
entries)
{
@@ -185,7 +186,6 @@ int compat_arch_memory_op(int op,
XEN_GUEST_HANDLE_PARAM(void) arg)
return mem_sharing_get_nr_shared_mfns();
case XENMEM_paging_op:
- case XENMEM_access_op:
{
xen_mem_event_op_t meo;
if ( copy_from_guest(&meo, arg, 1) )
@@ -195,6 +195,11 @@ int compat_arch_memory_op(int op,
XEN_GUEST_HANDLE_PARAM(void) arg)
return -EFAULT;
break;
}
+ case XENMEM_access_op:
+ {
+ rc = mem_access_memop(arg);
+ break;
+ }
case XENMEM_sharing_op:
{
xen_mem_sharing_op_t mso;
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index f6ea012..767bbfb 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -36,6 +36,7 @@
#include <asm/numa.h>
#include <asm/mem_event.h>
#include <asm/mem_sharing.h>
+#include <asm/mem_access.h>
#include <public/memory.h>
/* Parameters for PFN/MADDR compression. */
@@ -1007,7 +1008,6 @@ long subarch_memory_op(int op,
XEN_GUEST_HANDLE_PARAM(void) arg)
return mem_sharing_get_nr_shared_mfns();
case XENMEM_paging_op:
- case XENMEM_access_op:
{
xen_mem_event_op_t meo;
if ( copy_from_guest(&meo, arg, 1) )
@@ -1017,6 +1017,11 @@ long subarch_memory_op(int op,
XEN_GUEST_HANDLE_PARAM(void) arg)
return -EFAULT;
break;
}
+ case XENMEM_access_op:
+ {
+ rc = mem_access_memop(arg);
+ break;
+ }
case XENMEM_sharing_op:
{
xen_mem_sharing_op_t mso;
diff --git a/xen/include/asm-x86/mem_access.h b/xen/include/asm-x86/mem_access.h
index 60c2834..1138ccf 100644
--- a/xen/include/asm-x86/mem_access.h
+++ b/xen/include/asm-x86/mem_access.h
@@ -23,7 +23,7 @@
#ifndef _XEN_ASM_MEM_ACCESS_H
#define _XEN_ASM_MEM_ACCESS_H
-int mem_access_memop(struct domain *d, xen_mem_event_op_t *meo);
+int mem_access_memop(XEN_GUEST_HANDLE_PARAM(void) arg);
int mem_access_send_req(struct domain *d, mem_event_request_t *req);
#endif /* _XEN_ASM_MEM_ACCESS_H */
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index d644f82..3f8e00d 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -577,12 +577,12 @@ void p2m_mem_access_resume(struct domain *d);
/* Set access type for a region of pfns.
* If start_pfn == -1ul, sets the default access type */
long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
- uint32_t start, uint32_t mask, hvmmem_access_t access);
+ xenmem_access_t access);
/* Get access type for a pfn
* If pfn == -1ul, gets the default access type */
-int p2m_get_mem_access(struct domain *d, unsigned long pfn,
- hvmmem_access_t *access);
+int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+ xenmem_access_t *access);
/*
* Internal functions, only called by other p2m code
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index 3204ec4..fa9ac17 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -23,6 +23,7 @@
#include "../xen.h"
#include "../trace.h"
+#include "../memory.h"
/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
#define HVMOP_set_param 0
@@ -162,36 +163,31 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_xentrace_t);
/* Following tools-only interfaces may change in future. */
#if defined(__XEN__) || defined(__XEN_TOOLS__)
+/* Deprecated by XENMEM_access_op_set_access */
#define HVMOP_set_mem_access 12
typedef enum {
- HVMMEM_access_n,
- HVMMEM_access_r,
- HVMMEM_access_w,
- HVMMEM_access_rw,
- HVMMEM_access_x,
- HVMMEM_access_rx,
- HVMMEM_access_wx,
- HVMMEM_access_rwx,
- HVMMEM_access_rx2rw, /* Page starts off as r-x, but automatically
- * change to r-w on a write */
- HVMMEM_access_n2rwx, /* Log access: starts off as n, automatically
- * goes to rwx, generating an event without
- * pausing the vcpu */
- HVMMEM_access_default /* Take the domain default */
+ HVMMEM_access_n = XENMEM_access_n,
+ HVMMEM_access_r = XENMEM_access_r,
+ HVMMEM_access_w = XENMEM_access_w,
+ HVMMEM_access_rw = XENMEM_access_rw,
+ HVMMEM_access_x = XENMEM_access_x,
+ HVMMEM_access_rx = XENMEM_access_rx,
+ HVMMEM_access_wx = XENMEM_access_wx,
+ HVMMEM_access_rwx = XENMEM_access_rwx,
+ /*
+ * Page starts off as r-x, but automatically
+ * change to r-w on a write
+ */
+ HVMMEM_access_rx2rw = XENMEM_access_rx2rw,
+ /*
+ * Log access: starts off as n, automatically
+ * goes to rwx, generating an event without
+ * pausing the vcpu
+ */
+ HVMMEM_access_n2rwx = XENMEM_access_n2rwx,
+ /* Take the domain default */
+ HVMMEM_access_default = XENMEM_access_default
} hvmmem_access_t;
-/* Notify that a region of memory is to have specific access types */
-struct xen_hvm_set_mem_access {
- /* Domain to be updated. */
- domid_t domid;
- /* Memory type */
- uint16_t hvmmem_access; /* hvm_access_t */
- /* Number of pages, ignored on setting default access */
- uint32_t nr;
- /* First pfn, or ~0ull to set the default access for new pages */
- uint64_aligned_t first_pfn;
-};
-typedef struct xen_hvm_set_mem_access xen_hvm_set_mem_access_t;
-DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_t);
#define HVMOP_get_mem_access 13
/* Get the specific access type for that region of memory */
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index f19ac14..129d841 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -363,9 +363,6 @@ typedef struct xen_pod_target xen_pod_target_t;
#define XENMEM_paging_op_evict 1
#define XENMEM_paging_op_prep 2
-#define XENMEM_access_op 21
-#define XENMEM_access_op_resume 0
-
struct xen_mem_event_op {
uint8_t op; /* XENMEM_*_op_* */
domid_t domain;
@@ -379,6 +376,56 @@ struct xen_mem_event_op {
typedef struct xen_mem_event_op xen_mem_event_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
+#define XENMEM_access_op 21
+#define XENMEM_access_op_resume 0
+#define XENMEM_access_op_set_access 1
+#define XENMEM_access_op_get_access 2
+
+typedef enum {
+ XENMEM_access_n,
+ XENMEM_access_r,
+ XENMEM_access_w,
+ XENMEM_access_rw,
+ XENMEM_access_x,
+ XENMEM_access_rx,
+ XENMEM_access_wx,
+ XENMEM_access_rwx,
+ /*
+ * Page starts off as r-x, but automatically
+ * change to r-w on a write
+ */
+ XENMEM_access_rx2rw,
+ /*
+ * Log access: starts off as n, automatically
+ * goes to rwx, generating an event without
+ * pausing the vcpu
+ */
+ XENMEM_access_n2rwx,
+ /* Take the domain default */
+ XENMEM_access_default
+} xenmem_access_t;
+
+struct xen_mem_access_op {
+ /* XENMEM_access_op_* */
+ uint8_t op;
+ domid_t domid;
+ /* xenmem_access_t */
+ uint16_t xenmem_access;
+ /*
+ * Number of pages for set op
+ * Ignored on setting default access and other ops
+ */
+ uint32_t nr;
+ /*
+ * First pfn for set op
+ * pfn for get op
+ * ~0ull is used to set and get the default access for pages
+ */
+ uint64_aligned_t pfn;
+};
+typedef struct xen_mem_access_op xen_mem_access_op_t;
+DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
+
#define XENMEM_sharing_op 22
#define XENMEM_sharing_op_nominate_gfn 0
#define XENMEM_sharing_op_nominate_gref 1
--
1.8.3.2
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |