|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] x86/altp2m: merge p2m_set_altp2m_mem_access and p2m_set_mem_access
commit dc8080fe7fba71b8c44c230ca0d0d1ae4467e690
Author: Tamas K Lengyel <tlengyel@xxxxxxxxxxx>
AuthorDate: Wed Feb 10 14:46:09 2016 +0100
Commit: Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Wed Feb 10 14:46:09 2016 +0100
x86/altp2m: merge p2m_set_altp2m_mem_access and p2m_set_mem_access
The altp2m subsystem in its current form duplicates much of the existing
code present in p2m for setting mem_access permissions. In this patch we
consolidate the two versions but keep the separate MEMOP and HVMOP
interfaces.
Signed-off-by: Tamas K Lengyel <tlengyel@xxxxxxxxxxx>
Acked-by: Wei Liu <wei.liu2@xxxxxxxxxx>
Acked-by: Razvan Cojocaru <rcojocaru@xxxxxxxxxxxxxxx>
Acked-by: Ian Campbell < ian.campbell@xxxxxxxxxx >
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
---
xen/arch/arm/p2m.c | 9 +--
xen/arch/x86/hvm/hvm.c | 6 +-
xen/arch/x86/mm/p2m.c | 168 +++++++++++++++++++------------------------
xen/common/mem_access.c | 2 +-
xen/include/asm-x86/p2m.h | 4 --
xen/include/xen/p2m-common.h | 3 +-
6 files changed, 86 insertions(+), 106 deletions(-)
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index e8e6db4..a2a9c4b 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -1711,13 +1711,13 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla,
const struct npfec npfec)
if ( npfec.write_access && xma == XENMEM_access_rx2rw )
{
rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
- 0, ~0, XENMEM_access_rw);
+ 0, ~0, XENMEM_access_rw, 0);
return false;
}
else if ( xma == XENMEM_access_n2rwx )
{
rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
- 0, ~0, XENMEM_access_rwx);
+ 0, ~0, XENMEM_access_rwx, 0);
}
/* Otherwise, check if there is a vm_event monitor subscriber */
@@ -1739,7 +1739,7 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla,
const struct npfec npfec)
/* A listener is not required, so clear the access
* restrictions. */
rc = p2m_set_mem_access(v->domain, _gfn(paddr_to_pfn(gpa)), 1,
- 0, ~0, XENMEM_access_rwx);
+ 0, ~0, XENMEM_access_rwx, 0);
}
}
@@ -1790,7 +1790,8 @@ bool_t p2m_mem_access_check(paddr_t gpa, vaddr_t gla,
const struct npfec npfec)
* If gfn == INVALID_GFN, sets the default access type.
*/
long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
- uint32_t start, uint32_t mask, xenmem_access_t access)
+ uint32_t start, uint32_t mask, xenmem_access_t access,
+ unsigned int altp2m_idx)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
p2m_access_t a;
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 930d0e3..fe382ce 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -6460,9 +6460,9 @@ static int do_altp2m_op(
if ( a.u.set_mem_access.pad )
rc = -EINVAL;
else
- rc = p2m_set_altp2m_mem_access(d, a.u.set_mem_access.view,
- _gfn(a.u.set_mem_access.gfn),
- a.u.set_mem_access.hvmmem_access);
+ rc = p2m_set_mem_access(d, _gfn(a.u.set_mem_access.gfn), 1, 0, 0,
+ a.u.set_mem_access.hvmmem_access,
+ a.u.set_mem_access.view);
break;
case HVMOP_altp2m_change_gfn:
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 47e7fad..1b43853 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1743,14 +1743,56 @@ bool_t p2m_mem_access_check(paddr_t gpa, unsigned long
gla,
return (p2ma == p2m_access_n2rwx);
}
+static inline
+int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m,
+ struct p2m_domain *ap2m, p2m_access_t a,
+ gfn_t gfn)
+{
+ mfn_t mfn;
+ p2m_type_t t;
+ p2m_access_t old_a;
+ unsigned int page_order;
+ unsigned long gfn_l = gfn_x(gfn);
+ int rc;
+
+ mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL);
+
+ /* Check host p2m if no valid entry in alternate */
+ if ( !mfn_valid(mfn) )
+ {
+ mfn = hp2m->get_entry(hp2m, gfn_l, &t, &old_a,
+ P2M_ALLOC | P2M_UNSHARE, &page_order, NULL);
+
+ rc = -ESRCH;
+ if ( !mfn_valid(mfn) || t != p2m_ram_rw )
+ return rc;
+
+ /* If this is a superpage, copy that first */
+ if ( page_order != PAGE_ORDER_4K )
+ {
+ unsigned long mask = ~((1UL << page_order) - 1);
+ unsigned long gfn2_l = gfn_l & mask;
+ mfn_t mfn2 = _mfn(mfn_x(mfn) & mask);
+
+ rc = ap2m->set_entry(ap2m, gfn2_l, mfn2, page_order, t, old_a, 1);
+ if ( rc )
+ return rc;
+ }
+ }
+
+ return ap2m->set_entry(ap2m, gfn_l, mfn, PAGE_ORDER_4K, t, a,
+ (current->domain != d));
+}
+
/*
* Set access type for a region of gfns.
* If gfn == INVALID_GFN, sets the default access type.
*/
long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
- uint32_t start, uint32_t mask, xenmem_access_t access)
+ uint32_t start, uint32_t mask, xenmem_access_t access,
+ unsigned int altp2m_idx)
{
- struct p2m_domain *p2m = p2m_get_hostp2m(d);
+ struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL;
p2m_access_t a, _a;
p2m_type_t t;
mfn_t mfn;
@@ -1772,6 +1814,16 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
#undef ACCESS
};
+ /* altp2m view 0 is treated as the hostp2m */
+ if ( altp2m_idx )
+ {
+ if ( altp2m_idx >= MAX_ALTP2M ||
+ d->arch.altp2m_eptp[altp2m_idx] == INVALID_MFN )
+ return -EINVAL;
+
+ ap2m = d->arch.altp2m_p2m[altp2m_idx];
+ }
+
switch ( access )
{
case 0 ... ARRAY_SIZE(memaccess) - 1:
@@ -1792,12 +1844,25 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
}
p2m_lock(p2m);
+ if ( ap2m )
+ p2m_lock(ap2m);
+
for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l )
{
- mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
- rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
- if ( rc )
- break;
+ if ( ap2m )
+ {
+ rc = p2m_set_altp2m_mem_access(d, p2m, ap2m, a, _gfn(gfn_l));
+ /* If the corresponding mfn is invalid we will just skip it */
+ if ( rc && rc != -ESRCH )
+ break;
+ }
+ else
+ {
+ mfn = p2m->get_entry(p2m, gfn_l, &t, &_a, 0, NULL, NULL);
+ rc = p2m->set_entry(p2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, -1);
+ if ( rc )
+ break;
+ }
/* Check for continuation if it's not the last iteration. */
if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
@@ -1806,7 +1871,11 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn,
uint32_t nr,
break;
}
}
+
+ if ( ap2m )
+ p2m_unlock(ap2m);
p2m_unlock(p2m);
+
return rc;
}
@@ -2361,93 +2430,6 @@ int p2m_switch_domain_altp2m_by_id(struct domain *d,
unsigned int idx)
return rc;
}
-int p2m_set_altp2m_mem_access(struct domain *d, unsigned int idx,
- gfn_t gfn, xenmem_access_t access)
-{
- struct p2m_domain *hp2m, *ap2m;
- p2m_access_t req_a, old_a;
- p2m_type_t t;
- mfn_t mfn;
- unsigned int page_order;
- int rc = -EINVAL;
-
- static const p2m_access_t memaccess[] = {
-#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
- ACCESS(n),
- ACCESS(r),
- ACCESS(w),
- ACCESS(rw),
- ACCESS(x),
- ACCESS(rx),
- ACCESS(wx),
- ACCESS(rwx),
-#undef ACCESS
- };
-
- if ( idx >= MAX_ALTP2M || d->arch.altp2m_eptp[idx] == INVALID_MFN )
- return rc;
-
- ap2m = d->arch.altp2m_p2m[idx];
-
- switch ( access )
- {
- case 0 ... ARRAY_SIZE(memaccess) - 1:
- req_a = memaccess[access];
- break;
- case XENMEM_access_default:
- req_a = ap2m->default_access;
- break;
- default:
- return rc;
- }
-
- /* If request to set default access */
- if ( gfn_x(gfn) == INVALID_GFN )
- {
- ap2m->default_access = req_a;
- return 0;
- }
-
- hp2m = p2m_get_hostp2m(d);
-
- p2m_lock(ap2m);
-
- mfn = ap2m->get_entry(ap2m, gfn_x(gfn), &t, &old_a, 0, NULL, NULL);
-
- /* Check host p2m if no valid entry in alternate */
- if ( !mfn_valid(mfn) )
- {
- mfn = hp2m->get_entry(hp2m, gfn_x(gfn), &t, &old_a,
- P2M_ALLOC | P2M_UNSHARE, &page_order, NULL);
-
- if ( !mfn_valid(mfn) || t != p2m_ram_rw )
- goto out;
-
- /* If this is a superpage, copy that first */
- if ( page_order != PAGE_ORDER_4K )
- {
- gfn_t gfn2;
- unsigned long mask;
- mfn_t mfn2;
-
- mask = ~((1UL << page_order) - 1);
- gfn2 = _gfn(gfn_x(gfn) & mask);
- mfn2 = _mfn(mfn_x(mfn) & mask);
-
- if ( ap2m->set_entry(ap2m, gfn_x(gfn2), mfn2, page_order, t,
old_a, 1) )
- goto out;
- }
- }
-
- if ( !ap2m->set_entry(ap2m, gfn_x(gfn), mfn, PAGE_ORDER_4K, t, req_a,
- (current->domain != d)) )
- rc = 0;
-
- out:
- p2m_unlock(ap2m);
- return rc;
-}
-
int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
gfn_t old_gfn, gfn_t new_gfn)
{
diff --git a/xen/common/mem_access.c b/xen/common/mem_access.c
index 159c036..92ebead 100644
--- a/xen/common/mem_access.c
+++ b/xen/common/mem_access.c
@@ -67,7 +67,7 @@ int mem_access_memop(unsigned long cmd,
break;
rc = p2m_set_mem_access(d, _gfn(mao.pfn), mao.nr, start_iter,
- MEMOP_CMD_MASK, mao.access);
+ MEMOP_CMD_MASK, mao.access, 0);
if ( rc > 0 )
{
ASSERT(!(rc & MEMOP_CMD_MASK));
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index fa46dd9..c0df1ea 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -808,10 +808,6 @@ int p2m_destroy_altp2m_by_id(struct domain *d, unsigned
int idx);
/* Switch alternate p2m for entire domain */
int p2m_switch_domain_altp2m_by_id(struct domain *d, unsigned int idx);
-/* Set access type for a gfn */
-int p2m_set_altp2m_mem_access(struct domain *d, unsigned int idx,
- gfn_t gfn, xenmem_access_t access);
-
/* Change a gfn->mfn mapping */
int p2m_change_altp2m_gfn(struct domain *d, unsigned int idx,
gfn_t old_gfn, gfn_t new_gfn);
diff --git a/xen/include/xen/p2m-common.h b/xen/include/xen/p2m-common.h
index 47c40c7..8b70459 100644
--- a/xen/include/xen/p2m-common.h
+++ b/xen/include/xen/p2m-common.h
@@ -49,7 +49,8 @@ int unmap_mmio_regions(struct domain *d,
* If gfn == INVALID_GFN, sets the default access type.
*/
long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
- uint32_t start, uint32_t mask, xenmem_access_t access);
+ uint32_t start, uint32_t mask, xenmem_access_t access,
+ unsigned int altp2m_idx);
/*
* Get access type for a gfn.
--
generated by git-patchbot for /home/xen/git/xen.git#master
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |