|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2] xen/arm: Allow balooning working with 1:1 memory mapping
With the lake of iommu, dom0 must have a 1:1 memory mapping for all
these guest physical address. When the ballon decides to give back a
page to the kernel, this page must have the same address as previously.
Otherwise, we will loose the 1:1 mapping and will break DMA-capable
devices.
Signed-off-by: Julien Grall <julien.grall@xxxxxxxxxx>
Cc: Keir Fraser <keir@xxxxxxx>
Cc: Jan Beulich <jbeulich@xxxxxxxx>
---
Release: This is a bug that prevents DMA-capable devices to work after
a guest has started.
Changes in v2
- Drop CONFIG_ARM and add is_dom0_mapped_11
---
xen/arch/arm/domain_build.c | 5 +++++
xen/common/memory.c | 32 ++++++++++++++++++++++++++++++--
xen/include/asm-arm/domain.h | 2 ++
xen/include/asm-x86/domain.h | 2 ++
4 files changed, 39 insertions(+), 2 deletions(-)
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index faff88e..72e24e6 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -65,6 +65,11 @@ struct vcpu *__init alloc_dom0_vcpu0(void)
return alloc_vcpu(dom0, 0, 0);
}
+int is_dom0_mapped_11(void)
+{
+ return dom0_11_mapping;
+}
+
static void allocate_memory_11(struct domain *d, struct kernel_info *kinfo)
{
paddr_t start;
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 61791a4..43edd08 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -90,7 +90,7 @@ static void increase_reservation(struct memop_args *a)
static void populate_physmap(struct memop_args *a)
{
- struct page_info *page;
+ struct page_info *page = NULL;
unsigned long i, j;
xen_pfn_t gpfn, mfn;
struct domain *d = a->domain;
@@ -122,7 +122,29 @@ static void populate_physmap(struct memop_args *a)
}
else
{
- page = alloc_domheap_pages(d, a->extent_order, a->memflags);
+ if ( d == dom0 && is_dom0_mapped_11() )
+ {
+ mfn = gpfn;
+ if (!mfn_valid(mfn))
+ {
+ gdprintk(XENLOG_INFO, "Invalid mfn 0x%"PRI_xen_pfn"\n",
+ mfn);
+ goto out;
+ }
+
+ page = mfn_to_page(mfn);
+ if ( !get_page(page, d) )
+ {
+ gdprintk(XENLOG_INFO,
+ "mfn 0x%"PRI_xen_pfn" doesn't belong to dom0\n",
+ mfn);
+ goto out;
+ }
+ put_page(page);
+ }
+ else
+ page = alloc_domheap_pages(d, a->extent_order, a->memflags);
+
if ( unlikely(page == NULL) )
{
if ( !opt_tmem || (a->extent_order != 0) )
@@ -270,6 +292,12 @@ static void decrease_reservation(struct memop_args *a)
&& p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order)
)
continue;
+ /* With the lake for iommu on some ARM platform, dom0 must retrieve
+ * the same pfn when the hypercall populate_physmap is called.
+ */
+ if ( a->domain == dom0 && is_dom0_mapped_11() )
+ continue;
+
for ( j = 0; j < (1 << a->extent_order); j++ )
if ( !guest_remove_page(a->domain, gmfn + j) )
goto out;
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index 28d39a0..8a328d3 100644
--- a/xen/include/asm-arm/domain.h
+++ b/xen/include/asm-arm/domain.h
@@ -86,6 +86,8 @@ enum domain_type {
#define is_pv64_domain(d) (0)
#endif
+int is_dom0_mapped_11(void);
+
struct vtimer {
struct vcpu *v;
int irq;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 9d39061..618aa48 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -16,6 +16,8 @@
#define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
#define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain))
+#define is_dom0_mapped_11() (0)
+
#define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \
d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |