[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH 5/7] tools/xg: Streamline xc_cpuid_apply_policy()
Instantiates host, default and domain policies in order to clean up a lot of boilerplate hypercalls. This is partial work in order to deduplicate the same hypercalls being used when updating CPUID and MSR parts of the policy. Signed-off-by: Alejandro Vallejo <alejandro.vallejo@xxxxxxxxx> --- tools/include/xenguest.h | 1 + tools/libs/guest/xg_cpuid_x86.c | 184 ++++++++++++++++---------------- 2 files changed, 93 insertions(+), 92 deletions(-) diff --git a/tools/include/xenguest.h b/tools/include/xenguest.h index a66d9f7807..f0b58bb395 100644 --- a/tools/include/xenguest.h +++ b/tools/include/xenguest.h @@ -788,6 +788,7 @@ typedef struct xc_cpu_policy xc_cpu_policy_t; /* Create and free a xc_cpu_policy object. */ xc_cpu_policy_t *xc_cpu_policy_init(xc_interface *xch); +xc_cpu_policy_t *xc_cpu_policy_clone(const xc_cpu_policy_t *other); void xc_cpu_policy_destroy(xc_cpu_policy_t *policy); /* Retrieve a system policy, or get/set a domains policy. */ diff --git a/tools/libs/guest/xg_cpuid_x86.c b/tools/libs/guest/xg_cpuid_x86.c index 8fafeb2a02..acc94fb16b 100644 --- a/tools/libs/guest/xg_cpuid_x86.c +++ b/tools/libs/guest/xg_cpuid_x86.c @@ -635,13 +635,14 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, int rc; bool hvm; xc_domaininfo_t di; - unsigned int i, nr_leaves, nr_msrs; - xen_cpuid_leaf_t *leaves = NULL; - struct cpu_policy *p = NULL; - uint32_t err_leaf = -1, err_subleaf = -1, err_msr = -1; - uint32_t host_featureset[FEATURESET_NR_ENTRIES] = {}; - uint32_t len = ARRAY_SIZE(host_featureset); + uint32_t def_policy; + /* + * Three full policies. The host, default for the domain type, + * and domain current. + */ + xc_cpu_policy_t *host = NULL, *def = NULL, *cur = NULL; + /* Determine if domid is PV or HVM */ if ( (rc = xc_domain_getinfo_single(xch, domid, &di)) < 0 ) { PERROR("Failed to obtain d%d info", domid); @@ -650,48 +651,24 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, } hvm = di.flags & XEN_DOMINF_hvm_guest; - rc = xc_cpu_policy_get_size(xch, &nr_leaves, &nr_msrs); - if ( rc ) - { - PERROR("Failed to obtain policy info size"); - rc = -errno; - goto out; - } - - rc = -ENOMEM; - if ( (leaves = calloc(nr_leaves, sizeof(*leaves))) == NULL || - (p = calloc(1, sizeof(*p))) == NULL ) - goto out; - - /* Get the host policy. */ - rc = xc_get_cpu_featureset(xch, XEN_SYSCTL_cpu_featureset_host, - &len, host_featureset); - /* Tolerate "buffer too small", as we've got the bits we need. */ - if ( rc && errno != ENOBUFS ) + if ( !(host = xc_cpu_policy_init(xch)) || + !(def = xc_cpu_policy_clone(host)) || + !(cur = xc_cpu_policy_clone(host)) ) { - PERROR("Failed to obtain host featureset"); - rc = -errno; + PERROR("Failed to allocate policy state"); goto out; } - /* Get the domain's default policy. */ - nr_msrs = 0; - rc = get_system_cpu_policy(xch, hvm ? XEN_SYSCTL_cpu_policy_hvm_default - : XEN_SYSCTL_cpu_policy_pv_default, - &nr_leaves, leaves, &nr_msrs, NULL); - if ( rc ) - { - PERROR("Failed to obtain %s default policy", hvm ? "hvm" : "pv"); - rc = -errno; - goto out; - } + def_policy = hvm ? XEN_SYSCTL_cpu_policy_hvm_default + : XEN_SYSCTL_cpu_policy_pv_default; - rc = x86_cpuid_copy_from_buffer(p, leaves, nr_leaves, - &err_leaf, &err_subleaf); - if ( rc ) + /* Get the domain type's default policy. */ + if ( (rc = xc_cpu_policy_get_domain(xch, domid, cur)) || + (rc = xc_cpu_policy_get_system(xch, def_policy, def)) || + (rc = xc_cpu_policy_get_system(xch, XEN_SYSCTL_cpu_policy_host, host)) ) { - ERROR("Failed to deserialise CPUID (err leaf %#x, subleaf %#x) (%d = %s)", - err_leaf, err_subleaf, -rc, strerror(-rc)); + PERROR("Failed to obtain d%d, %s and/or host policies", + domid, hvm ? "hvm" : "pv"); goto out; } @@ -711,18 +688,16 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, * - Re-enable features which have become (possibly) off by default. */ - p->basic.rdrand = test_bit(X86_FEATURE_RDRAND, host_featureset); - p->feat.hle = test_bit(X86_FEATURE_HLE, host_featureset); - p->feat.rtm = test_bit(X86_FEATURE_RTM, host_featureset); + cur->policy.basic.rdrand = host->policy.basic.rdrand; + cur->policy.feat.hle = host->policy.feat.hle; + cur->policy.feat.rtm = host->policy.feat.rtm; if ( hvm ) - { - p->feat.mpx = test_bit(X86_FEATURE_MPX, host_featureset); - } + cur->policy.feat.mpx = host->policy.feat.mpx; - p->basic.max_leaf = min(p->basic.max_leaf, 0xdu); - p->feat.max_subleaf = 0; - p->extd.max_leaf = min(p->extd.max_leaf, 0x8000001c); + cur->policy.basic.max_leaf = min(host->policy.basic.max_leaf, 0xdu); + cur->policy.feat.max_subleaf = 0; + cur->policy.extd.max_leaf = min(host->policy.extd.max_leaf, 0x8000001c); } if ( featureset ) @@ -730,7 +705,6 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, uint32_t disabled_features[FEATURESET_NR_ENTRIES], feat[FEATURESET_NR_ENTRIES] = {}; static const uint32_t deep_features[] = INIT_DEEP_FEATURES; - unsigned int i, b; /* * The user supplied featureset may be shorter or longer than @@ -741,17 +715,17 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, /* Check for truncated set bits. */ rc = -EOPNOTSUPP; - for ( i = user_len; i < nr_features; ++i ) + for ( size_t i = user_len; i < nr_features; ++i ) if ( featureset[i] != 0 ) goto out; memcpy(feat, featureset, sizeof(*featureset) * user_len); /* Disable deep dependencies of disabled features. */ - for ( i = 0; i < ARRAY_SIZE(disabled_features); ++i ) + for ( size_t i = 0; i < ARRAY_SIZE(disabled_features); ++i ) disabled_features[i] = ~feat[i] & deep_features[i]; - for ( b = 0; b < sizeof(disabled_features) * CHAR_BIT; ++b ) + for ( size_t b = 0; b < sizeof(disabled_features) * CHAR_BIT; ++b ) { const uint32_t *dfs; @@ -759,24 +733,24 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, !(dfs = x86_cpu_policy_lookup_deep_deps(b)) ) continue; - for ( i = 0; i < ARRAY_SIZE(disabled_features); ++i ) + for ( size_t i = 0; i < ARRAY_SIZE(disabled_features); ++i ) { feat[i] &= ~dfs[i]; disabled_features[i] &= ~dfs[i]; } } - x86_cpu_featureset_to_policy(feat, p); + x86_cpu_featureset_to_policy(feat, &cur->policy); } else { - p->extd.itsc = itsc; + cur->policy.extd.itsc = itsc; if ( hvm ) { - p->basic.pae = pae; - p->basic.vmx = nested_virt; - p->extd.svm = nested_virt; + cur->policy.basic.pae = pae; + cur->policy.basic.vmx = nested_virt; + cur->policy.extd.svm = nested_virt; } } @@ -786,8 +760,8 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, * On hardware without CPUID Faulting, PV guests see real topology. * As a consequence, they also need to see the host htt/cmp fields. */ - p->basic.htt = test_bit(X86_FEATURE_HTT, host_featureset); - p->extd.cmp_legacy = test_bit(X86_FEATURE_CMP_LEGACY, host_featureset); + cur->policy.basic.htt = host->policy.basic.htt; + cur->policy.extd.cmp_legacy = host->policy.extd.cmp_legacy; } else { @@ -795,28 +769,28 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, * Topology for HVM guests is entirely controlled by Xen. For now, we * hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT. */ - p->basic.htt = true; - p->extd.cmp_legacy = false; + cur->policy.basic.htt = true; + cur->policy.extd.cmp_legacy = false; /* * Leaf 1 EBX[23:16] is Maximum Logical Processors Per Package. * Update to reflect vLAPIC_ID = vCPU_ID * 2, but make sure to avoid * overflow. */ - if ( !p->basic.lppp ) - p->basic.lppp = 2; - else if ( !(p->basic.lppp & 0x80) ) - p->basic.lppp *= 2; + if ( !cur->policy.basic.lppp ) + cur->policy.basic.lppp = 2; + else if ( !(cur->policy.basic.lppp & 0x80) ) + cur->policy.basic.lppp *= 2; - switch ( p->x86_vendor ) + switch ( cur->policy.x86_vendor ) { case X86_VENDOR_INTEL: - for ( i = 0; (p->cache.subleaf[i].type && - i < ARRAY_SIZE(p->cache.raw)); ++i ) + for ( size_t i = 0; (cur->policy.cache.subleaf[i].type && + i < ARRAY_SIZE(cur->policy.cache.raw)); ++i ) { - p->cache.subleaf[i].cores_per_package = - (p->cache.subleaf[i].cores_per_package << 1) | 1; - p->cache.subleaf[i].threads_per_cache = 0; + cur->policy.cache.subleaf[i].cores_per_package = + (cur->policy.cache.subleaf[i].cores_per_package << 1) | 1; + cur->policy.cache.subleaf[i].threads_per_cache = 0; } break; @@ -836,30 +810,22 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, * apic_id_size values greater than 7. Limit the value to * 7 for now. */ - if ( p->extd.nc < 0x7f ) + if ( cur->policy.extd.nc < 0x7f ) { - if ( p->extd.apic_id_size != 0 && p->extd.apic_id_size < 0x7 ) - p->extd.apic_id_size++; + if ( cur->policy.extd.apic_id_size != 0 && + cur->policy.extd.apic_id_size < 0x7 ) + cur->policy.extd.apic_id_size++; - p->extd.nc = (p->extd.nc << 1) | 1; + cur->policy.extd.nc = (cur->policy.extd.nc << 1) | 1; } break; } } - rc = x86_cpuid_copy_to_buffer(p, leaves, &nr_leaves); - if ( rc ) - { - ERROR("Failed to serialise CPUID (%d = %s)", -rc, strerror(-rc)); - goto out; - } - - rc = xc_set_domain_cpu_policy(xch, domid, nr_leaves, leaves, 0, NULL, - &err_leaf, &err_subleaf, &err_msr); + /* Re-apply the mutated policy onto the domain */ + rc = xc_cpu_policy_set_domain(xch, domid, cur); if ( rc ) { - PERROR("Failed to set d%d's policy (err leaf %#x, subleaf %#x, msr %#x)", - domid, err_leaf, err_subleaf, err_msr); rc = -errno; goto out; } @@ -877,8 +843,9 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, rc = 0; out: - free(p); - free(leaves); + xc_cpu_policy_destroy(cur); + xc_cpu_policy_destroy(def); + xc_cpu_policy_destroy(host); return rc; } @@ -911,6 +878,39 @@ xc_cpu_policy_t *xc_cpu_policy_init(xc_interface *xch) return NULL; } +xc_cpu_policy_t *xc_cpu_policy_clone(const xc_cpu_policy_t *other) +{ + xc_cpu_policy_t *policy; + + if ( !other ) + return NULL; + + policy = malloc(sizeof(*policy)); + if ( !policy ) + return NULL; + + *policy = *other; + + /* Override every buffer with identical new ones */ + policy->leaves.buf = calloc(other->leaves.allocated, + sizeof(*other->leaves.buf)); + policy->msrs.buf = calloc(other->msrs.allocated, + sizeof(*other->msrs.buf)); + + if ( !policy->leaves.buf || !policy->msrs.buf ) + { + xc_cpu_policy_destroy(policy); + return NULL; + } + + memcpy(policy->leaves.buf, other->leaves.buf, + other->leaves.allocated); + memcpy(policy->msrs.buf, other->msrs.buf, + other->msrs.allocated); + + return policy; +} + void xc_cpu_policy_destroy(xc_cpu_policy_t *policy) { int err = errno; -- 2.34.1
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |