|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH v3 08/10] tools/libxc: Rework xc_cpuid_apply_policy() to use {get, set}_cpu_policy()
On 26/09/2019 09:04, Jan Beulich wrote:
> On 25.09.2019 20:11, Andrew Cooper wrote:
>> +int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid,
>> + const uint32_t *featureset, unsigned int
>> nr_features)
>> +{
>> + int rc;
>> + xc_dominfo_t di;
>> + unsigned int i, nr_leaves, nr_msrs;
>> + xen_cpuid_leaf_t *leaves = NULL;
>> + struct cpuid_policy *p = NULL;
>> + uint32_t err_leaf = -1, err_subleaf = -1, err_msr = -1;
>> +
>> + if ( xc_domain_getinfo(xch, domid, 1, &di) != 1 ||
>> + di.domid != domid )
>> + {
>> + ERROR("Failed to obtain d%d info", domid);
>> + rc = -ESRCH;
>> + goto out;
>> + }
>> +
>> + rc = xc_get_cpu_policy_size(xch, &nr_leaves, &nr_msrs);
>> + if ( rc )
>> + {
>> + PERROR("Failed to obtain policy info size");
>> + rc = -errno;
>> + goto out;
>> + }
>> +
>> + rc = -ENOMEM;
>> + if ( (leaves = calloc(nr_leaves, sizeof(*leaves))) == NULL ||
>> + (p = calloc(1, sizeof(*p))) == NULL )
>> + goto out;
>> +
>> + /* Get the domain's default policy. */
>> + nr_msrs = 0;
>> + rc = xc_get_system_cpu_policy(xch, di.hvm ?
>> XEN_SYSCTL_cpu_policy_hvm_default
>> + :
>> XEN_SYSCTL_cpu_policy_pv_default,
>> + &nr_leaves, leaves, &nr_msrs, NULL);
>> + if ( rc )
>> + {
>> + PERROR("Failed to obtain %s default policy", di.hvm ? "hvm" : "pv");
>> + rc = -errno;
>> + goto out;
>> + }
>> +
>> + rc = x86_cpuid_copy_from_buffer(p, leaves, nr_leaves,
>> + &err_leaf, &err_subleaf);
>> + if ( rc )
>> + {
>> + ERROR("Failed to deserialise CPUID (err leaf %#x, subleaf %#x) (%d
>> = %s)",
>> + err_leaf, err_subleaf, -rc, strerror(-rc));
>> + goto out;
>> + }
>> +
>> + if ( featureset )
>> + {
>> + uint32_t disabled_features[FEATURESET_NR_ENTRIES],
>> + feat[FEATURESET_NR_ENTRIES] = {};
>> + static const uint32_t deep_features[] = INIT_DEEP_FEATURES;
>> + unsigned int i, b;
>> +
>> + /*
>> + * The user supplied featureset may be shorter or longer than
>> + * FEATURESET_NR_ENTRIES. Shorter is fine, and we will zero-extend.
>> + * Longer is fine, so long as it only padded with zeros.
>> + */
>> + unsigned int user_len = min(FEATURESET_NR_ENTRIES + 0u,
>> nr_features);
>> +
>> + /* Check for truncated set bits. */
>> + rc = -EOPNOTSUPP;
>> + for ( i = user_len; i < nr_features; ++i )
>> + if ( featureset[i] != 0 )
>> + goto out;
>> +
>> + memcpy(feat, featureset, sizeof(*featureset) * user_len);
>> +
>> + /* Disable deep dependencies of disabled features. */
>> + for ( i = 0; i < ARRAY_SIZE(disabled_features); ++i )
>> + disabled_features[i] = ~feat[i] & deep_features[i];
>> +
>> + for ( b = 0; b < sizeof(disabled_features) * CHAR_BIT; ++b )
>> + {
>> + const uint32_t *dfs;
>> +
>> + if ( !test_bit(b, disabled_features) ||
>> + !(dfs = x86_cpuid_lookup_deep_deps(b)) )
>> + continue;
>> +
>> + for ( i = 0; i < ARRAY_SIZE(disabled_features); ++i )
>> + {
>> + feat[i] &= ~dfs[i];
>> + disabled_features[i] &= ~dfs[i];
>> + }
>> + }
>> +
>> + cpuid_featureset_to_policy(feat, p);
>> + }
>> +
>> + if ( !di.hvm )
>> + {
>> + uint32_t host_featureset[FEATURESET_NR_ENTRIES] = {};
>> + uint32_t len = ARRAY_SIZE(host_featureset);
>> +
>> + rc = xc_get_cpu_featureset(xch, XEN_SYSCTL_cpu_featureset_host,
>> + &len, host_featureset);
>> + if ( rc )
>> + {
>> + /* Tolerate "buffer too small", as we've got the bits we need.
>> */
>> + if ( errno == ENOBUFS )
>> + rc = 0;
>> + else
>> + {
>> + PERROR("Failed to obtain host featureset");
>> + rc = -errno;
>> + goto out;
>> + }
>> + }
>> +
>> + /*
>> + * On hardware without CPUID Faulting, PV guests see real topology.
>> + * As a consequence, they also need to see the host htt/cmp fields.
>> + */
>> + p->basic.htt = test_bit(X86_FEATURE_HTT, host_featureset);
>> + p->extd.cmp_legacy = test_bit(X86_FEATURE_CMP_LEGACY,
>> host_featureset);
>> + }
>> + else
>> + {
>> + /*
>> + * Topology for HVM guests is entirely controlled by Xen. For now,
>> we
>> + * hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT.
>> + */
>> + p->basic.htt = true;
>> + p->extd.cmp_legacy = false;
>> +
>> + /*
>> + * EBX[23:16] is Maximum Logical Processors Per Package.
>> + * Update to reflect vLAPIC_ID = vCPU_ID * 2, but make sure to avoid
>> + * overflow.
>> + */
>> + if ( !(p->basic.lppp & 0x80) )
>> + p->basic.lppp *= 2;
> I think you want to start the comment with "Leaf 1 EBX[23:16] ...",
> as p->basic covers all basic leaves.
>
> Additionally, while using masking instead of a relational operator
> is correct here, ...
>
>> + switch ( p->x86_vendor )
>> + {
>> + case X86_VENDOR_INTEL:
>> + for ( i = 0; (p->cache.subleaf[i].type &&
>> + i < ARRAY_SIZE(p->cache.raw)); ++i )
>> + {
>> + p->cache.subleaf[i].cores_per_package =
>> + (p->cache.subleaf[i].cores_per_package << 1) | 1;
>> + p->cache.subleaf[i].threads_per_cache = 0;
>> + }
>> + break;
>> +
>> + case X86_VENDOR_AMD:
>> + case X86_VENDOR_HYGON:
>> + /*
>> + * ECX[15:12] is ApicIdCoreSize.
>> + * ECX[7:0] is NumberOfCores (minus one).
>> + * Update to reflect vLAPIC_ID = vCPU_ID * 2. But avoid
>> + * - overflow,
>> + * - going out of sync with leaf 1 EBX[23:16],
>> + * - incrementing ApicIdCoreSize when it's zero (which changes
>> the
>> + * meaning of bits 7:0).
>> + */
>> + if ( !(p->extd.nc & 0x80) )
> ... it isn't here, i.e. this isn't a correct transformation of the
> recent change for Rome): If the value is 0x7f here, the value in
> leaf 1 would be 0x80. An adjustment, however, needs to be done
> either to both leaves, or to none of them, to keep the values in
> sufficient sync (and I think you'd break Rome again otherwise, as
> p->extd.nc _is_ 0x7f there). Hence the "(regs[2] & 0xffu) < 0x7fu"
> check in my recent patch.
Urgh yes - I questioned this when doing the rebase a second time. I'll
revert to the logic you had.
>
> Like above I think you want to name the (extended) leaf in the
> comment, as p->extd similarly covers all extended leaves.
Done.
~Andrew
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |