[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [PATCH 2/4] tools/tests: Unit test for p2m pool size


  • To: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
  • From: Jan Beulich <jbeulich@xxxxxxxx>
  • Date: Wed, 26 Oct 2022 16:24:47 +0200
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=suse.com; dmarc=pass action=none header.from=suse.com; dkim=pass header.d=suse.com; arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=aWxrGAWAw0/X6LENbgHoP2IGgFGkCRf3WP3OUYrnITE=; b=i6+E0oGM1VcTOvN+94BjwdppQs1T0C6FiCb/59fMyl+8gbOZoG3duVzmxMCVr+eZpyf1c4Yi5rg195jLHLwFKNd3NX1MWAg5EiruVcm4iY9Az1Xf9eMwlcCZ5FP0nvtU2sX8bvdNXNeFtbr3FYtU+YYrytyh5baoRe8zZllbEfFUOiG6GhsJSjiDRXOxsHnN/u4pA8mFgqpMaQfM3Y4AhmOeI10b9B1NiVuCv3XdgTI5FD0F44YWvIDZEmYSFEBLJN4Zt2+OGdcQPIV485tOEjY/ChnCuXXfeTK1uDunqZegvFwJbDYzOEDnyeyO4fMSp611RT0sf23wySer1xHSHw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=coCrfAQjgqu8FNe34VpupCY7IT9zLFxD/eBmE0kudqMWCvFIyhMfzUAxm2cHBAf/NtrpuXLcrKX2qJRWPE9XqciZ2e7ALjwlGdDx4CEQH6EOZOvRKLRrpq+10DeLz/lKPEdEmp2vDXYchn/wjgpjz3NMl7jKcCdNUBvPbD6CAbt84hzBFKjLgnKjH+bmHCtBZqG3gkCGIC1/wzYSsLdGEh06cYXEMjVd1YA4FYsy3nshUkcRwFkHosOJxBWjB9xko3G7hZBTmCMpYhQAfAvAgYalNEHdNsq3br49Y9ijs0KqV/k6tasZAYu9K63pC8RuEC0tInZn/VeXyaO22TTuqA==
  • Authentication-results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=suse.com;
  • Cc: Roger Pau Monné <roger.pau@xxxxxxxxxx>, Wei Liu <wl@xxxxxxx>, Stefano Stabellini <sstabellini@xxxxxxxxxx>, Julien Grall <julien@xxxxxxx>, Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>, Bertrand Marquis <bertrand.marquis@xxxxxxx>, Henry Wang <Henry.Wang@xxxxxxx>, Anthony PERARD <anthony.perard@xxxxxxxxxx>, Xen-devel <xen-devel@xxxxxxxxxxxxxxxxxxxx>
  • Delivery-date: Wed, 26 Oct 2022 14:25:00 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>

On 26.10.2022 12:20, Andrew Cooper wrote:
> --- /dev/null
> +++ b/tools/tests/p2m-pool/Makefile
> @@ -0,0 +1,42 @@
> +XEN_ROOT = $(CURDIR)/../../..
> +include $(XEN_ROOT)/tools/Rules.mk
> +
> +TARGET := test-p2m-pool
> +
> +.PHONY: all
> +all: $(TARGET)
> +
> +.PHONY: clean
> +clean:
> +     $(RM) -- *.o $(TARGET) $(DEPS_RM)
> +
> +.PHONY: distclean
> +distclean: clean
> +     $(RM) -- *~
> +
> +.PHONY: install
> +install: all
> +     $(INSTALL_DIR) $(DESTDIR)$(LIBEXEC_BIN)
> +     $(INSTALL_PROG) $(TARGET) $(DESTDIR)$(LIBEXEC_BIN)
> +
> +.PHONY: uninstall
> +uninstall:
> +     $(RM) -- $(DESTDIR)$(LIBEXEC_BIN)/$(TARGET)
> +
> +CFLAGS += $(CFLAGS_xeninclude)
> +CFLAGS += $(CFLAGS_libxenctrl)
> +CFLAGS += $(CFLAGS_libxenforeginmemory)

Typo here or typo also elsewhere: CFLAGS_libxenforeignmemory? I
have to admit that I can't really spot where these variables are
populated. The place in Rules.mk that I could spot uses

 CFLAGS_libxen$(1) = $$(CFLAGS_xeninclude)

i.e. the expansion doesn't really depend on the library.

Apart from this looks okay to me, maybe apart from ...

> --- /dev/null
> +++ b/tools/tests/p2m-pool/test-p2m-pool.c
> @@ -0,0 +1,181 @@
> +#include <err.h>
> +#include <errno.h>
> +#include <inttypes.h>
> +#include <stdio.h>
> +#include <string.h>
> +#include <sys/mman.h>
> +
> +#include <xenctrl.h>
> +#include <xenforeignmemory.h>
> +#include <xengnttab.h>
> +#include <xen-tools/libs.h>
> +
> +static unsigned int nr_failures;
> +#define fail(fmt, ...)                          \
> +({                                              \
> +    nr_failures++;                              \
> +    (void)printf(fmt, ##__VA_ARGS__);           \
> +})
> +
> +static xc_interface *xch;
> +static uint32_t domid;
> +
> +static struct xen_domctl_createdomain create = {
> +    .flags = XEN_DOMCTL_CDF_hvm | XEN_DOMCTL_CDF_hap,
> +    .max_vcpus = 1,
> +    .max_grant_frames = 1,
> +    .grant_opts = XEN_DOMCTL_GRANT_version(1),
> +
> +    .arch = {
> +#if defined(__x86_64__) || defined(__i386__)
> +        .emulation_flags = XEN_X86_EMU_LAPIC,
> +#endif
> +    },
> +};
> +
> +static uint64_t default_p2m_size_bytes =
> +#if defined(__x86_64__) || defined(__i386__)
> +    256 << 12; /* Only x86 HAP for now.  x86 Shadow broken. */

... this shadow related comment (the commit message could at least
say what's wrong there, to give a hint at what would need fixing to
remove this restriction) and ...

> +#elif defined (__arm__) || defined(__aarch64__)
> +    16 << 12;
> +#endif
> +
> +static void run_tests(void)
> +{
> +    xen_pfn_t physmap[] = { 0 };
> +    uint64_t size_bytes, old_size_bytes;
> +    int rc;
> +
> +    printf("Test default p2m mempool size\n");
> +
> +    rc = xc_get_p2m_mempool_size(xch, domid, &size_bytes);
> +    if ( rc )
> +        return fail("  Fail: get p2m mempool size: %d - %s\n",
> +                    errno, strerror(errno));
> +
> +    printf("P2M pool size %"PRIu64" bytes (%"PRIu64"kB, %"PRIu64"MB)\n",
> +           size_bytes, size_bytes >> 10, size_bytes >> 20);
> +
> +
> +    /*
> +     * Check that the domain has the expected default allocation size.  This
> +     * will fail if the logic in Xen is altered without an equivelent
> +     * adjustment here.
> +     */
> +    if ( size_bytes != default_p2m_size_bytes )
> +        return fail("  Fail: size %"PRIu64" != expected size %"PRIu64"\n",
> +                    size_bytes, default_p2m_size_bytes);
> +
> +
> +    printf("Test that allocate doesn't alter pool size\n");
> +
> +    /*
> +     * Populate the domain with some RAM.  This will cause more of the p2m
> +     * pool to be used.
> +     */
> +    old_size_bytes = size_bytes;
> +
> +    rc = xc_domain_setmaxmem(xch, domid, -1);
> +    if ( rc )
> +        return fail("  Fail: setmaxmem: : %d - %s\n",
> +                    errno, strerror(errno));
> +
> +    rc = xc_domain_populate_physmap_exact(xch, domid, 1, 0, 0, physmap);
> +    if ( rc )
> +        return fail("  Fail: populate physmap: %d - %s\n",
> +                    errno, strerror(errno));
> +
> +    /*
> +     * Re-get the p2m size.  Should not have changed as a consequence of
> +     * populate physmap.
> +     */
> +    rc = xc_get_p2m_mempool_size(xch, domid, &size_bytes);
> +    if ( rc )
> +        return fail("  Fail: get p2m mempool size: %d - %s\n",
> +                    errno, strerror(errno));
> +
> +    if ( old_size_bytes != size_bytes )
> +        return fail("  Fail: p2m mempool size changed %"PRIu64" => 
> %"PRIu64"\n",
> +                    old_size_bytes, size_bytes);
> +
> +
> +
> +    printf("Test bad set size\n");
> +
> +    /*
> +     * Check that setting a non-page size results in failure.
> +     */
> +    rc = xc_set_p2m_mempool_size(xch, domid, size_bytes + 1);
> +    if ( rc != -1 || errno != EINVAL )
> +        return fail("  Fail: Bad set size: expected -1/EINVAL, got %d/%d - 
> %s\n",
> +                    rc, errno, strerror(errno));
> +
> +
> +    printf("Test very large set size\n");
> +
> +    /*
> +     * Check that setting a large P2M size succeeds.  This is expecting to
> +     * trigger continuations.
> +     */
> +    rc = xc_set_p2m_mempool_size(xch, domid, 64 << 20);
> +    if ( rc )
> +        return fail("  Fail: Set size 64MB: %d - %s\n",
> +                    errno, strerror(errno));
> +
> +
> +    /*
> +     * Check that the reported size matches what set consumed.
> +     */
> +    rc = xc_get_p2m_mempool_size(xch, domid, &size_bytes);
> +    if ( rc )
> +        return fail("  Fail: get p2m mempool size: %d - %s\n",
> +                    errno, strerror(errno));
> +
> +    if ( size_bytes != 64 << 20 )
> +        return fail("  Fail: expected mempool size %u, got %"PRIu64"\n",
> +                    64 << 20, size_bytes);
> +}
> +
> +int main(int argc, char **argv)
> +{
> +    int rc;
> +
> +    printf("P2M Shadow memory pool tests\n");

... the question of why "Shadow" here.

Jan



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.