|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 08/11] x86/altp2m: add remaining support routines.
Add the remaining routines required to support enabling the alternate
p2m functionality.
Signed-off-by: Ed White <edmund.h.white@xxxxxxxxx>
---
xen/arch/x86/hvm/hvm.c | 12 ++
xen/arch/x86/mm/hap/altp2m_hap.c | 76 ++++++++
xen/arch/x86/mm/p2m.c | 339 ++++++++++++++++++++++++++++++++++++
xen/include/asm-x86/hvm/altp2mhvm.h | 6 +
xen/include/asm-x86/p2m.h | 26 +++
5 files changed, 459 insertions(+)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e8787cc..e6f64a3 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2782,6 +2782,18 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long
gla,
goto out;
}
+ if ( altp2mhvm_active(v->domain) )
+ {
+ int rv = altp2mhvm_hap_nested_page_fault(v, gpa, gla, npfec);
+
+ switch (rv) {
+ case ALTP2MHVM_PAGEFAULT_DONE:
+ return 1;
+ case ALTP2MHVM_PAGEFAULT_CONTINUE:
+ break;
+ }
+ }
+
p2m = p2m_get_hostp2m(v->domain);
mfn = get_gfn_type_access(p2m, gfn, &p2mt, &p2ma,
P2M_ALLOC | (npfec.write_access ? P2M_UNSHARE :
0),
diff --git a/xen/arch/x86/mm/hap/altp2m_hap.c b/xen/arch/x86/mm/hap/altp2m_hap.c
index c2cdc42..b889626 100644
--- a/xen/arch/x86/mm/hap/altp2m_hap.c
+++ b/xen/arch/x86/mm/hap/altp2m_hap.c
@@ -29,6 +29,8 @@
#include <asm/hap.h>
#include <asm/hvm/support.h>
+#include <asm/hvm/altp2mhvm.h>
+
#include "private.h"
/* Override macros from asm/page.h to make them work with mfn_t */
@@ -56,6 +58,80 @@ altp2m_write_p2m_entry(struct p2m_domain *p2m, unsigned long
gfn,
}
/*
+ * If the fault is for a not present entry:
+ * if the entry is present in the host p2m and is ram, copy it and retry
+ * else indicate that outer handler should handle fault
+ *
+ * If the fault is for a present entry:
+ * if the page type is not p2m_ram_rw_ve crash domain
+ * else if hardware does not support #VE emulate it and retry
+ * else crash domain
+ */
+
+int
+altp2mhvm_hap_nested_page_fault(struct vcpu *v, paddr_t gpa,
+ unsigned long gla, struct npfec npfec)
+{
+ struct domain *d = v->domain;
+ struct p2m_domain *hp2m = p2m_get_hostp2m(d);
+ struct p2m_domain *ap2m;
+ p2m_type_t p2mt;
+ p2m_access_t p2ma;
+ unsigned int page_order;
+ unsigned long gfn, mask;
+ mfn_t mfn;
+ int rv;
+
+ ap2m = p2m_get_altp2m(v);
+
+ mfn = get_gfn_type_access(ap2m, gpa >> PAGE_SHIFT, &p2mt, &p2ma,
+ 0, &page_order);
+ __put_gfn(ap2m, gpa >> PAGE_SHIFT);
+
+ if ( mfn_valid(mfn) )
+ {
+ /* Should #VE be emulated for this fault? */
+ if ( p2mt == p2m_ram_rw_ve && !cpu_has_vmx_virt_exceptions &&
+ ahvm_vcpu_emulate_ve(v) )
+ return ALTP2MHVM_PAGEFAULT_DONE;
+
+ /* Could not handle fault here */
+ gdprintk(XENLOG_INFO, "Altp2m memory access permissions failure, "
+ "no mem_event listener VCPU %d, dom %d\n",
+ v->vcpu_id, d->domain_id);
+ domain_crash(v->domain);
+ return ALTP2MHVM_PAGEFAULT_CONTINUE;
+ }
+
+ mfn = get_gfn_type_access(hp2m, gpa >> PAGE_SHIFT, &p2mt, &p2ma,
+ 0, &page_order);
+ put_gfn(hp2m->domain, gpa >> PAGE_SHIFT);
+
+ if ( p2mt != p2m_ram_rw || p2ma != p2m_access_rwx )
+ return ALTP2MHVM_PAGEFAULT_CONTINUE;
+
+ p2m_lock(ap2m);
+
+ /* If this is a superpage mapping, round down both frame numbers
+ * to the start of the superpage. */
+ mask = ~((1UL << page_order) - 1);
+ gfn = (gpa >> PAGE_SHIFT) & mask;
+ mfn = _mfn(mfn_x(mfn) & mask);
+
+ rv = p2m_set_entry(ap2m, gfn, mfn, page_order, p2mt, p2ma);
+ p2m_unlock(ap2m);
+
+ if ( rv ) {
+ gdprintk(XENLOG_ERR,
+ "failed to set entry for %#"PRIx64" -> %#"PRIx64"\n",
+ gpa, mfn_x(mfn));
+ domain_crash(hp2m->domain);
+ }
+
+ return ALTP2MHVM_PAGEFAULT_DONE;
+}
+
+/*
* Local variables:
* mode: C
* c-file-style: "BSD"
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 3c6049b..44bf1ad 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1993,6 +1993,345 @@ bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v,
uint16_t idx)
return rc;
}
+void p2m_flush_altp2m(struct domain *d)
+{
+ uint16_t i;
+
+ altp2m_lock(d);
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ p2m_flush_table(d->arch.altp2m_p2m[i]);
+ d->arch.altp2m_eptp[i] = ~0ul;
+ }
+
+ altp2m_unlock(d);
+}
+
+bool_t p2m_init_altp2m_by_id(struct domain *d, uint16_t idx)
+{
+ struct p2m_domain *p2m;
+ struct ept_data *ept;
+ bool_t rc = 0;
+
+ if ( idx > MAX_ALTP2M )
+ return rc;
+
+ altp2m_lock(d);
+
+ if ( d->arch.altp2m_eptp[idx] == ~0ul )
+ {
+ p2m = d->arch.altp2m_p2m[idx];
+ ept = &p2m->ept;
+ ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
+ d->arch.altp2m_eptp[idx] = ept_get_eptp(ept);
+ rc = 1;
+ }
+
+ altp2m_unlock(d);
+ return rc;
+}
+
+bool_t p2m_init_next_altp2m(struct domain *d, uint16_t *idx)
+{
+ struct p2m_domain *p2m;
+ struct ept_data *ept;
+ bool_t rc = 0;
+ uint16_t i;
+
+ altp2m_lock(d);
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ if ( d->arch.altp2m_eptp[i] != ~0ul )
+ continue;
+
+ p2m = d->arch.altp2m_p2m[i];
+ ept = &p2m->ept;
+ ept->asr = pagetable_get_pfn(p2m_get_pagetable(p2m));
+ d->arch.altp2m_eptp[i] = ept_get_eptp(ept);
+ *idx = i;
+ rc = 1;
+
+ break;
+ }
+
+ altp2m_unlock(d);
+ return rc;
+}
+
+bool_t p2m_destroy_altp2m_by_id(struct domain *d, uint16_t idx)
+{
+ struct p2m_domain *p2m;
+ struct vcpu *curr = current;
+ struct vcpu *v;
+ bool_t rc = 0;
+
+ if ( !idx || idx > MAX_ALTP2M )
+ return rc;
+
+ if ( curr->domain != d )
+ domain_pause(d);
+ else
+ for_each_vcpu( d, v )
+ if ( curr != v )
+ vcpu_pause(v);
+
+ altp2m_lock(d);
+
+ if ( d->arch.altp2m_eptp[idx] != ~0ul )
+ {
+ p2m = d->arch.altp2m_p2m[idx];
+
+ if ( !cpumask_weight(p2m->dirty_cpumask) )
+ {
+ p2m_flush_table(d->arch.altp2m_p2m[idx]);
+ d->arch.altp2m_eptp[idx] = ~0ul;
+ rc = 1;
+ }
+ }
+
+ altp2m_unlock(d);
+
+ if ( curr->domain != d )
+ domain_unpause(d);
+ else
+ for_each_vcpu( d, v )
+ if ( curr != v )
+ vcpu_unpause(v);
+
+ return rc;
+}
+
+bool_t p2m_switch_domain_altp2m_by_id(struct domain *d, uint16_t idx)
+{
+ struct vcpu *curr = current;
+ struct vcpu *v;
+ bool_t rc = 0;
+
+ if ( idx > MAX_ALTP2M )
+ return rc;
+
+ if ( curr->domain != d )
+ domain_pause(d);
+ else
+ for_each_vcpu( d, v )
+ if ( curr != v )
+ vcpu_pause(v);
+
+ altp2m_lock(d);
+
+ if ( d->arch.altp2m_eptp[idx] != ~0ul )
+ {
+ for_each_vcpu( d, v )
+ if ( idx != vcpu_altp2mhvm(v).p2midx )
+ {
+ cpumask_clear_cpu(v->vcpu_id,
p2m_get_altp2m(v)->dirty_cpumask);
+ vcpu_altp2mhvm(v).p2midx = idx;
+ cpumask_set_cpu(v->vcpu_id, p2m_get_altp2m(v)->dirty_cpumask);
+ ahvm_vcpu_update_eptp(v);
+ }
+
+ rc = 1;
+ }
+
+ altp2m_unlock(d);
+
+ if ( curr->domain != d )
+ domain_unpause(d);
+ else
+ for_each_vcpu( d, v )
+ if ( curr != v )
+ vcpu_unpause(v);
+
+ return rc;
+}
+
+bool_t p2m_set_altp2m_mem_access(struct domain *d, uint16_t idx,
+ unsigned long pfn, xenmem_access_t access)
+{
+ struct p2m_domain *hp2m, *ap2m;
+ p2m_access_t a, _a;
+ p2m_type_t t;
+ mfn_t mfn;
+ unsigned int page_order;
+ bool_t rc = 0;
+
+ static const p2m_access_t memaccess[] = {
+#define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac
+ ACCESS(n),
+ ACCESS(r),
+ ACCESS(w),
+ ACCESS(rw),
+ ACCESS(x),
+ ACCESS(rx),
+ ACCESS(wx),
+ ACCESS(rwx),
+#undef ACCESS
+ };
+
+ if ( idx > MAX_ALTP2M || d->arch.altp2m_eptp[idx] == ~0ul )
+ return 0;
+
+ ap2m = d->arch.altp2m_p2m[idx];
+
+ switch ( access )
+ {
+ case 0 ... ARRAY_SIZE(memaccess) - 1:
+ a = memaccess[access];
+ break;
+ case XENMEM_access_default:
+ a = ap2m->default_access;
+ break;
+ default:
+ return 0;
+ }
+
+ /* If request to set default access */
+ if ( pfn == ~0ul )
+ {
+ ap2m->default_access = a;
+ return 1;
+ }
+
+ hp2m = p2m_get_hostp2m(d);
+
+ p2m_lock(ap2m);
+
+ mfn = ap2m->get_entry(ap2m, pfn, &t, &_a, 0, NULL);
+
+ /* Check host p2m if no valid entry in alternate */
+ if ( !mfn_valid(mfn) )
+ {
+ mfn = hp2m->get_entry(hp2m, pfn, &t, &_a, 0, &page_order);
+
+ if ( !mfn_valid(mfn) || t != p2m_ram_rw )
+ goto out;
+
+ /* If this is a superpage, copy that first */
+ if ( page_order != PAGE_ORDER_4K )
+ {
+ unsigned long gfn, mask;
+ mfn_t mfn2;
+
+ mask = ~((1UL << page_order) - 1);
+ gfn = pfn & mask;
+ mfn2 = _mfn(mfn_x(mfn) & mask);
+
+ if ( ap2m->set_entry(ap2m, gfn, mfn2, page_order, t, _a) )
+ goto out;
+ }
+ }
+
+ /* Use special ram type to enable #VE if setting for current domain */
+ if ( current->domain == d )
+ t = p2m_ram_rw_ve;
+
+ if ( !ap2m->set_entry(ap2m, pfn, mfn, PAGE_ORDER_4K, t, a) )
+ rc = 1;
+
+out:
+ p2m_unlock(ap2m);
+ return rc;
+}
+
+bool_t p2m_change_altp2m_pfn(struct domain *d, uint16_t idx,
+ unsigned long old_pfn, unsigned long new_pfn)
+{
+ struct p2m_domain *hp2m, *ap2m;
+ p2m_access_t a;
+ p2m_type_t t;
+ mfn_t mfn;
+ unsigned int page_order;
+ bool_t rc = 0;
+
+ if ( idx > MAX_ALTP2M || d->arch.altp2m_eptp[idx] == ~0ul )
+ return 0;
+
+ hp2m = p2m_get_hostp2m(d);
+ ap2m = d->arch.altp2m_p2m[idx];
+
+ p2m_lock(ap2m);
+
+ mfn = ap2m->get_entry(ap2m, old_pfn, &t, &a, 0, NULL);
+
+ if ( new_pfn == ~0ul )
+ {
+ if ( mfn_valid(mfn) )
+ p2m_remove_page(ap2m, old_pfn, mfn_x(mfn), PAGE_ORDER_4K);
+ rc = 1;
+ goto out;
+ }
+
+ /* Check host p2m if no valid entry in alternate */
+ if ( !mfn_valid(mfn) )
+ {
+ mfn = hp2m->get_entry(hp2m, old_pfn, &t, &a, 0, &page_order);
+
+ if ( !mfn_valid(mfn) || t != p2m_ram_rw )
+ goto out;
+
+ /* If this is a superpage, copy that first */
+ if ( page_order != PAGE_ORDER_4K )
+ {
+ unsigned long gfn, mask;
+
+ mask = ~((1UL << page_order) - 1);
+ gfn = old_pfn & mask;
+ mfn = _mfn(mfn_x(mfn) & mask);
+
+ if ( ap2m->set_entry(ap2m, gfn, mfn, page_order, t, a) )
+ goto out;
+ }
+ }
+
+ mfn = ap2m->get_entry(ap2m, new_pfn, &t, &a, 0, NULL);
+
+ if ( !mfn_valid(mfn) )
+ mfn = hp2m->get_entry(hp2m, new_pfn, &t, &a, 0, NULL);
+
+ if ( !mfn_valid(mfn) || !(t == p2m_ram_rw || t == p2m_ram_rw) )
+ goto out;
+
+ /* Use special ram type to enable #VE if setting for current domain */
+ if ( current->domain == d )
+ t = p2m_ram_rw_ve;
+
+ if ( !ap2m->set_entry(ap2m, old_pfn, mfn, PAGE_ORDER_4K, t, a) )
+ rc = 1;
+
+out:
+ p2m_unlock(ap2m);
+ return rc;
+}
+
+void p2m_remove_altp2m_page(struct domain *d, unsigned long gfn)
+{
+ struct p2m_domain *p2m;
+ p2m_access_t a;
+ p2m_type_t t;
+ mfn_t mfn;
+ uint16_t i;
+
+ altp2m_lock(d);
+
+ for ( i = 0; i < MAX_ALTP2M; i++ )
+ {
+ if ( d->arch.altp2m_eptp[i] == ~0ul )
+ continue;
+
+ p2m = d->arch.altp2m_p2m[i];
+ mfn = get_gfn_type_access(p2m, gfn, &t, &a, 0, NULL);
+
+ if ( mfn_valid(mfn) )
+ p2m_remove_page(p2m, gfn, mfn_x(mfn), PAGE_ORDER_4K);
+
+ __put_gfn(p2m, gfn);
+ }
+
+ altp2m_unlock(d);
+}
+
/*** Audit ***/
#if P2M_AUDIT
diff --git a/xen/include/asm-x86/hvm/altp2mhvm.h
b/xen/include/asm-x86/hvm/altp2mhvm.h
index 919986e..f752815 100644
--- a/xen/include/asm-x86/hvm/altp2mhvm.h
+++ b/xen/include/asm-x86/hvm/altp2mhvm.h
@@ -32,5 +32,11 @@ int altp2mhvm_vcpu_initialise(struct vcpu *v);
void altp2mhvm_vcpu_destroy(struct vcpu *v);
void altp2mhvm_vcpu_reset(struct vcpu *v);
+/* Alternate p2m paging */
+#define ALTP2MHVM_PAGEFAULT_DONE 0
+#define ALTP2MHVM_PAGEFAULT_CONTINUE 1
+int altp2mhvm_hap_nested_page_fault(struct vcpu *v, paddr_t gpa,
+ unsigned long gla, struct npfec npfec);
+
#endif /* _HVM_ALTP2M_H */
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index 68a5f80..52588ed 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -711,6 +711,32 @@ bool_t p2m_find_altp2m_by_eptp(struct domain *d, uint64_t
eptp, unsigned long *i
/* Switch alternate p2m for a single vcpu */
bool_t p2m_switch_vcpu_altp2m_by_id(struct vcpu *v, uint16_t idx);
+/* Flush all the alternate p2m's for a domain */
+void p2m_flush_altp2m(struct domain *d);
+
+/* Make a specific alternate p2m valid */
+bool_t p2m_init_altp2m_by_id(struct domain *d, uint16_t idx);
+
+/* Find an available alternate p2m and make it valid */
+bool_t p2m_init_next_altp2m(struct domain *d, uint16_t *idx);
+
+/* Make a specific alternate p2m invalid */
+bool_t p2m_destroy_altp2m_by_id(struct domain *d, uint16_t idx);
+
+/* Switch alternate p2m for entire domain */
+bool_t p2m_switch_domain_altp2m_by_id(struct domain *d, uint16_t idx);
+
+/* Set access type for a pfn */
+bool_t p2m_set_altp2m_mem_access(struct domain *d, uint16_t idx,
+ unsigned long pfn, xenmem_access_t access);
+
+/* Replace a pfn with a different pfn */
+bool_t p2m_change_altp2m_pfn(struct domain *d, uint16_t idx,
+ unsigned long old_pfn, unsigned long new_pfn);
+
+/* Invalidate a page in all alternate p2m's */
+void p2m_remove_altp2m_page(struct domain *d, unsigned long gfn);
+
/*
* p2m type to IOMMU flags
*/
--
1.9.1
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |