|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v2 3/3] tools/libxc: use superpages during restore of HVM guest
On Wed, Aug 23, Olaf Hering wrote:
> The value of p2m_size does not represent the actual number of pages
> assigned to a domU. This info is stored in getdomaininfo.max_pages,
> which is currently not used by restore. I will see if using this value
> will avoid triggering the Over-allocation check.
This untested change ontop of this series (done with git diff -w -b
base..HEAD) does some accounting to avoid Over-allocation:
diff --git a/tools/libxc/xc_sr_common.h b/tools/libxc/xc_sr_common.h
index 26c45fdd6d..e0321ea224 100644
--- a/tools/libxc/xc_sr_common.h
+++ b/tools/libxc/xc_sr_common.h
@@ -234,6 +234,8 @@ struct xc_sr_context
int send_back_fd;
unsigned long p2m_size;
+ unsigned long max_pages;
+ unsigned long tot_pages;
xc_hypercall_buffer_t dirty_bitmap_hbuf;
/* From Image Header. */
@@ -375,6 +377,7 @@ static inline bool xc_sr_bitmap_resize(struct xc_sr_bitmap
*bm, unsigned long bi
static inline void xc_sr_bitmap_free(struct xc_sr_bitmap *bm)
{
free(bm->p);
+ bm->p = NULL;
}
static inline bool xc_sr_set_bit(unsigned long bit, struct xc_sr_bitmap *bm)
diff --git a/tools/libxc/xc_sr_restore.c b/tools/libxc/xc_sr_restore.c
index 1f9fe25b8f..eff24d3805 100644
--- a/tools/libxc/xc_sr_restore.c
+++ b/tools/libxc/xc_sr_restore.c
@@ -758,6 +758,9 @@ int xc_domain_restore(xc_interface *xch, int io_fd,
uint32_t dom,
return -1;
}
+ /* See xc_domain_getinfo */
+ ctx.restore.max_pages = ctx.dominfo.max_memkb >> (PAGE_SHIFT-10);
+ ctx.restore.tot_pages = ctx.dominfo.nr_pages;
ctx.restore.p2m_size = nr_pfns;
if ( ctx.dominfo.hvm )
diff --git a/tools/libxc/xc_sr_restore_x86_hvm.c
b/tools/libxc/xc_sr_restore_x86_hvm.c
index 60454148db..f2932dafb7 100644
--- a/tools/libxc/xc_sr_restore_x86_hvm.c
+++ b/tools/libxc/xc_sr_restore_x86_hvm.c
@@ -278,7 +278,8 @@ static int pfn_set_allocated(struct xc_sr_context *ctx,
xen_pfn_t pfn)
static int x86_hvm_allocate_pfn(struct xc_sr_context *ctx, xen_pfn_t pfn)
{
xc_interface *xch = ctx->xch;
- bool success = false;
+ struct xc_sr_bitmap *bm;
+ bool success = false, do_sp;
int rc = -1, done;
unsigned int order;
unsigned long i;
@@ -303,15 +304,18 @@ static int x86_hvm_allocate_pfn(struct xc_sr_context
*ctx, xen_pfn_t pfn)
return -1;
}
DPRINTF("idx_1g %lu idx_2m %lu\n", idx_1g, idx_2m);
- if (!xc_sr_test_and_set_bit(idx_1g, &ctx->x86_hvm.restore.attempted_1g)) {
+
+ bm = &ctx->x86_hvm.restore.attempted_1g;
order = SUPERPAGE_1GB_SHIFT;
count = 1UL << order;
+ do_sp = ctx->restore.tot_pages + count <= ctx->restore.max_pages;
+ if ( do_sp && !xc_sr_test_and_set_bit(idx_1g, bm) ) {
base_pfn = (pfn >> order) << order;
extnt = base_pfn;
done = xc_domain_populate_physmap(xch, ctx->domid, 1, order, 0,
&extnt);
DPRINTF("1G base_pfn %" PRI_xen_pfn " done %d\n", base_pfn, done);
if ( done > 0 ) {
- struct xc_sr_bitmap *bm = &ctx->x86_hvm.restore.attempted_2m;
+ bm = &ctx->x86_hvm.restore.attempted_2m;
success = true;
stat_1g = done;
for ( i = 0; i < (count >> SUPERPAGE_2MB_SHIFT); i++ )
@@ -319,9 +323,11 @@ static int x86_hvm_allocate_pfn(struct xc_sr_context *ctx,
xen_pfn_t pfn)
}
}
- if (!xc_sr_test_and_set_bit(idx_2m, &ctx->x86_hvm.restore.attempted_2m)) {
+ bm = &ctx->x86_hvm.restore.attempted_2m;
order = SUPERPAGE_2MB_SHIFT;
count = 1UL << order;
+ do_sp = ctx->restore.tot_pages + count <= ctx->restore.max_pages;
+ if ( do_sp && !xc_sr_test_and_set_bit(idx_2m, bm) ) {
base_pfn = (pfn >> order) << order;
extnt = base_pfn;
done = xc_domain_populate_physmap(xch, ctx->domid, 1, order, 0,
&extnt);
@@ -344,6 +350,7 @@ static int x86_hvm_allocate_pfn(struct xc_sr_context *ctx,
xen_pfn_t pfn)
if ( success == true ) {
do {
count--;
+ ctx->restore.tot_pages++;
rc = pfn_set_allocated(ctx, base_pfn + count);
if ( rc )
break;
@@ -396,6 +403,7 @@ static int x86_hvm_populate_pfns(struct xc_sr_context *ctx,
unsigned count,
PERROR("Failed to release pfn %" PRI_xen_pfn, min_pfn);
goto err;
}
+ ctx->restore.tot_pages--;
}
min_pfn++;
}
Olaf
Attachment:
signature.asc _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |