[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen stable-4.15] libx86: Update library API for cpu_policy
commit c68a43c3a223c180295261826530a78920adc088 Author: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> AuthorDate: Mon Apr 3 14:18:43 2023 +0100 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Thu Aug 3 19:13:40 2023 +0100 libx86: Update library API for cpu_policy Adjust the API and comments appropriately. x86_cpu_policy_fill_native() will eventually contain MSR reads, but leave a TODO in the short term. No practical change. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Jan Beulich <jbeulich@xxxxxxxx> (cherry picked from commit 1b67fccf3b02825f6a036bad06cd17963d0972d2) tools/libs/guest: Fix build following libx86 changes I appear to have lost this hunk somewhere... Fixes: 1b67fccf3b02 ("libx86: Update library API for cpu_policy") Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Acked-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> (cherry picked from commit 48d76e6da92f9ef76c8468e299349a2f698362fa) --- tools/fuzz/cpu-policy/afl-policy-fuzzer.c | 4 +-- tools/libs/guest/xg_cpuid_x86.c | 2 +- tools/tests/cpu-policy/test-cpu-policy.c | 4 +-- tools/tests/x86_emulator/x86-emulate.c | 2 +- xen/arch/x86/cpu-policy.c | 4 +-- xen/arch/x86/cpu/common.c | 2 +- xen/arch/x86/domctl.c | 2 +- xen/arch/x86/xstate.c | 4 +-- xen/include/xen/lib/x86/cpu-policy.h | 49 ++++++++++++++++++------------- xen/lib/x86/cpuid.c | 26 ++++++++-------- xen/lib/x86/msr.c | 4 +-- 11 files changed, 56 insertions(+), 47 deletions(-) diff --git a/tools/fuzz/cpu-policy/afl-policy-fuzzer.c b/tools/fuzz/cpu-policy/afl-policy-fuzzer.c index 585324e417..11df2f7802 100644 --- a/tools/fuzz/cpu-policy/afl-policy-fuzzer.c +++ b/tools/fuzz/cpu-policy/afl-policy-fuzzer.c @@ -48,8 +48,8 @@ static void check_policy(struct cpu_policy *cp) * Fix up the data in the source policy which isn't expected to survive * serialisation. */ - x86_cpuid_policy_clear_out_of_range_leaves(cp); - x86_cpuid_policy_recalc_synth(cp); + x86_cpu_policy_clear_out_of_range_leaves(cp); + x86_cpu_policy_recalc_synth(cp); /* Serialise... */ rc = x86_cpuid_copy_to_buffer(cp, leaves, &nr_leaves); diff --git a/tools/libs/guest/xg_cpuid_x86.c b/tools/libs/guest/xg_cpuid_x86.c index 53922922b1..edfe2fe41f 100644 --- a/tools/libs/guest/xg_cpuid_x86.c +++ b/tools/libs/guest/xg_cpuid_x86.c @@ -558,7 +558,7 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore, const uint32_t *dfs; if ( !test_bit(b, disabled_features) || - !(dfs = x86_cpuid_lookup_deep_deps(b)) ) + !(dfs = x86_cpu_policy_lookup_deep_deps(b)) ) continue; for ( i = 0; i < ARRAY_SIZE(disabled_features); ++i ) diff --git a/tools/tests/cpu-policy/test-cpu-policy.c b/tools/tests/cpu-policy/test-cpu-policy.c index a7c19be63f..88d35c6c8e 100644 --- a/tools/tests/cpu-policy/test-cpu-policy.c +++ b/tools/tests/cpu-policy/test-cpu-policy.c @@ -95,7 +95,7 @@ static void test_cpuid_current(void) printf("Testing CPUID on current CPU\n"); - x86_cpuid_policy_fill_native(&p); + x86_cpu_policy_fill_native(&p); rc = x86_cpuid_copy_to_buffer(&p, leaves, &nr); if ( rc != 0 ) @@ -537,7 +537,7 @@ static void test_cpuid_out_of_range_clearing(void) void *ptr; unsigned int nr_markers; - x86_cpuid_policy_clear_out_of_range_leaves(p); + x86_cpu_policy_clear_out_of_range_leaves(p); /* Count the number of 0xc2's still remaining. */ for ( ptr = p, nr_markers = 0; diff --git a/tools/tests/x86_emulator/x86-emulate.c b/tools/tests/x86_emulator/x86-emulate.c index 3172f9e7ac..babdb59d1b 100644 --- a/tools/tests/x86_emulator/x86-emulate.c +++ b/tools/tests/x86_emulator/x86-emulate.c @@ -85,7 +85,7 @@ bool emul_test_init(void) unsigned long sp; - x86_cpuid_policy_fill_native(&cp); + x86_cpu_policy_fill_native(&cp); /* * The emulator doesn't use these instructions, so can always emulate diff --git a/xen/arch/x86/cpu-policy.c b/xen/arch/x86/cpu-policy.c index 73195ec7d9..b9cfc2a29f 100644 --- a/xen/arch/x86/cpu-policy.c +++ b/xen/arch/x86/cpu-policy.c @@ -176,7 +176,7 @@ static void sanitise_featureset(uint32_t *fs) for_each_set_bit(i, (void *)disabled_features, sizeof(disabled_features) * 8) { - const uint32_t *dfs = x86_cpuid_lookup_deep_deps(i); + const uint32_t *dfs = x86_cpu_policy_lookup_deep_deps(i); unsigned int j; ASSERT(dfs); /* deep_features[] should guarentee this. */ @@ -347,7 +347,7 @@ static void __init calculate_raw_policy(void) { struct cpu_policy *p = &raw_cpu_policy; - x86_cpuid_policy_fill_native(p); + x86_cpu_policy_fill_native(p); /* Nothing good will come from Xen and libx86 disagreeing on vendor. */ ASSERT(p->x86_vendor == boot_cpu_data.x86_vendor); diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c index 8b5e32c78b..41ad80756a 100644 --- a/xen/arch/x86/cpu/common.c +++ b/xen/arch/x86/cpu/common.c @@ -72,7 +72,7 @@ void __init setup_clear_cpu_cap(unsigned int cap) __builtin_return_address(0), cap); __clear_bit(cap, boot_cpu_data.x86_capability); - dfs = x86_cpuid_lookup_deep_deps(cap); + dfs = x86_cpu_policy_lookup_deep_deps(cap); if (!dfs) return; diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 0ace9cc7c2..f17fc24926 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -76,7 +76,7 @@ static int update_domain_cpu_policy(struct domain *d, goto out; /* Trim any newly-stale out-of-range leaves. */ - x86_cpuid_policy_clear_out_of_range_leaves(new); + x86_cpu_policy_clear_out_of_range_leaves(new); /* Audit the combined dataset. */ ret = x86_cpu_policies_are_compatible(sys, new, &err); diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c index 3bcc523f80..7d935354ae 100644 --- a/xen/arch/x86/xstate.c +++ b/xen/arch/x86/xstate.c @@ -677,7 +677,7 @@ static bool valid_xcr0(u64 xcr0) int validate_xstate(const struct domain *d, uint64_t xcr0, uint64_t xcr0_accum, const struct xsave_hdr *hdr) { - uint64_t xcr0_max = cpuid_policy_xcr0_max(d->arch.cpuid); + uint64_t xcr0_max = cpu_policy_xcr0_max(d->arch.cpuid); unsigned int i; if ( (hdr->xstate_bv & ~xcr0_accum) || @@ -701,7 +701,7 @@ int validate_xstate(const struct domain *d, uint64_t xcr0, uint64_t xcr0_accum, int handle_xsetbv(u32 index, u64 new_bv) { struct vcpu *curr = current; - uint64_t xcr0_max = cpuid_policy_xcr0_max(curr->domain->arch.cpuid); + uint64_t xcr0_max = cpu_policy_xcr0_max(curr->domain->arch.cpuid); u64 mask; if ( index != XCR_XFEATURE_ENABLED_MASK ) diff --git a/xen/include/xen/lib/x86/cpu-policy.h b/xen/include/xen/lib/x86/cpu-policy.h index 57b4633c86..cf7de0f29c 100644 --- a/xen/include/xen/lib/x86/cpu-policy.h +++ b/xen/include/xen/lib/x86/cpu-policy.h @@ -399,33 +399,38 @@ void x86_cpu_policy_to_featureset(const struct cpu_policy *p, void x86_cpu_featureset_to_policy(const uint32_t fs[FEATURESET_NR_ENTRIES], struct cpu_policy *p); -static inline uint64_t cpuid_policy_xcr0_max(const struct cpuid_policy *p) +static inline uint64_t cpu_policy_xcr0_max(const struct cpu_policy *p) { return ((uint64_t)p->xstate.xcr0_high << 32) | p->xstate.xcr0_low; } -static inline uint64_t cpuid_policy_xstates(const struct cpuid_policy *p) +static inline uint64_t cpu_policy_xstates(const struct cpu_policy *p) { uint64_t val = p->xstate.xcr0_high | p->xstate.xss_high; return (val << 32) | p->xstate.xcr0_low | p->xstate.xss_low; } -const uint32_t *x86_cpuid_lookup_deep_deps(uint32_t feature); +/** + * For a specific feature, look up the dependent features. Returns NULL if + * this feature has no dependencies. Otherwise return a featureset of + * dependent features, which has been recursively flattened. + */ +const uint32_t *x86_cpu_policy_lookup_deep_deps(uint32_t feature); /** - * Recalculate the content in a CPUID policy which is derived from raw data. + * Recalculate the content in a CPU policy which is derived from raw data. */ -void x86_cpuid_policy_recalc_synth(struct cpuid_policy *p); +void x86_cpu_policy_recalc_synth(struct cpu_policy *p); /** - * Fill a CPUID policy using the native CPUID instruction. + * Fill CPU policy using the native CPUID/RDMSR instruction. * * No sanitisation is performed, but synthesised values are calculated. * Values may be influenced by a hypervisor or from masking/faulting * configuration. */ -void x86_cpuid_policy_fill_native(struct cpuid_policy *p); +void x86_cpu_policy_fill_native(struct cpu_policy *p); /** * Clear leaf data beyond the policies max leaf/subleaf settings. @@ -436,7 +441,7 @@ void x86_cpuid_policy_fill_native(struct cpuid_policy *p); * with out-of-range leaves with stale content in them. This helper clears * them. */ -void x86_cpuid_policy_clear_out_of_range_leaves(struct cpuid_policy *p); +void x86_cpu_policy_clear_out_of_range_leaves(struct cpu_policy *p); #ifdef __XEN__ #include <public/arch-x86/xen.h> @@ -449,9 +454,10 @@ typedef xen_msr_entry_t msr_entry_buffer_t[]; #endif /** - * Serialise a cpuid_policy object into an array of cpuid leaves. + * Serialise the CPUID leaves of a cpu_policy object into an array of cpuid + * leaves. * - * @param policy The cpuid_policy to serialise. + * @param policy The cpu_policy to serialise. * @param leaves The array of leaves to serialise into. * @param nr_entries The number of entries in 'leaves'. * @returns -errno @@ -460,13 +466,14 @@ typedef xen_msr_entry_t msr_entry_buffer_t[]; * leaves array is too short. On success, nr_entries is updated with the * actual number of leaves written. */ -int x86_cpuid_copy_to_buffer(const struct cpuid_policy *policy, +int x86_cpuid_copy_to_buffer(const struct cpu_policy *policy, cpuid_leaf_buffer_t leaves, uint32_t *nr_entries); /** - * Unserialise a cpuid_policy object from an array of cpuid leaves. + * Unserialise the CPUID leaves of a cpu_policy object into an array of cpuid + * leaves. * - * @param policy The cpuid_policy to unserialise into. + * @param policy The cpu_policy to unserialise into. * @param leaves The array of leaves to unserialise from. * @param nr_entries The number of entries in 'leaves'. * @param err_leaf Optional hint for error diagnostics. @@ -474,21 +481,21 @@ int x86_cpuid_copy_to_buffer(const struct cpuid_policy *policy, * @returns -errno * * Reads at most CPUID_MAX_SERIALISED_LEAVES. May return -ERANGE if an - * incoming leaf is out of range of cpuid_policy, in which case the optional + * incoming leaf is out of range of cpu_policy, in which case the optional * err_* pointers will identify the out-of-range indicies. * * No content validation of in-range leaves is performed. Synthesised data is * recalculated. */ -int x86_cpuid_copy_from_buffer(struct cpuid_policy *policy, +int x86_cpuid_copy_from_buffer(struct cpu_policy *policy, const cpuid_leaf_buffer_t leaves, uint32_t nr_entries, uint32_t *err_leaf, uint32_t *err_subleaf); /** - * Serialise an msr_policy object into an array. + * Serialise the MSRs of a cpu_policy object into an array. * - * @param policy The msr_policy to serialise. + * @param policy The cpu_policy to serialise. * @param msrs The array of msrs to serialise into. * @param nr_entries The number of entries in 'msrs'. * @returns -errno @@ -497,13 +504,13 @@ int x86_cpuid_copy_from_buffer(struct cpuid_policy *policy, * buffer array is too short. On success, nr_entries is updated with the * actual number of msrs written. */ -int x86_msr_copy_to_buffer(const struct msr_policy *policy, +int x86_msr_copy_to_buffer(const struct cpu_policy *policy, msr_entry_buffer_t msrs, uint32_t *nr_entries); /** - * Unserialise an msr_policy object from an array of msrs. + * Unserialise the MSRs of a cpu_policy object from an array of msrs. * - * @param policy The msr_policy object to unserialise into. + * @param policy The cpu_policy object to unserialise into. * @param msrs The array of msrs to unserialise from. * @param nr_entries The number of entries in 'msrs'. * @param err_msr Optional hint for error diagnostics. @@ -517,7 +524,7 @@ int x86_msr_copy_to_buffer(const struct msr_policy *policy, * * No content validation is performed on the data stored in the policy object. */ -int x86_msr_copy_from_buffer(struct msr_policy *policy, +int x86_msr_copy_from_buffer(struct cpu_policy *policy, const msr_entry_buffer_t msrs, uint32_t nr_entries, uint32_t *err_msr); diff --git a/xen/lib/x86/cpuid.c b/xen/lib/x86/cpuid.c index 734e90823a..68aafb4049 100644 --- a/xen/lib/x86/cpuid.c +++ b/xen/lib/x86/cpuid.c @@ -102,13 +102,13 @@ void x86_cpu_featureset_to_policy( p->feat._7d1 = fs[FEATURESET_7d1]; } -void x86_cpuid_policy_recalc_synth(struct cpuid_policy *p) +void x86_cpu_policy_recalc_synth(struct cpu_policy *p) { p->x86_vendor = x86_cpuid_lookup_vendor( p->basic.vendor_ebx, p->basic.vendor_ecx, p->basic.vendor_edx); } -void x86_cpuid_policy_fill_native(struct cpuid_policy *p) +void x86_cpu_policy_fill_native(struct cpu_policy *p) { unsigned int i; @@ -199,7 +199,7 @@ void x86_cpuid_policy_fill_native(struct cpuid_policy *p) cpuid_count_leaf(0xd, 0, &p->xstate.raw[0]); cpuid_count_leaf(0xd, 1, &p->xstate.raw[1]); - xstates = cpuid_policy_xstates(p); + xstates = cpu_policy_xstates(p); /* This logic will probably need adjusting when XCR0[63] gets used. */ BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) > 63); @@ -222,10 +222,12 @@ void x86_cpuid_policy_fill_native(struct cpuid_policy *p) p->hv_limit = 0; p->hv2_limit = 0; - x86_cpuid_policy_recalc_synth(p); + /* TODO MSRs */ + + x86_cpu_policy_recalc_synth(p); } -void x86_cpuid_policy_clear_out_of_range_leaves(struct cpuid_policy *p) +void x86_cpu_policy_clear_out_of_range_leaves(struct cpu_policy *p) { unsigned int i; @@ -260,7 +262,7 @@ void x86_cpuid_policy_clear_out_of_range_leaves(struct cpuid_policy *p) zero_leaves(p->topo.raw, i, ARRAY_SIZE(p->topo.raw) - 1); } - if ( p->basic.max_leaf < 0xd || !cpuid_policy_xstates(p) ) + if ( p->basic.max_leaf < 0xd || !cpu_policy_xstates(p) ) memset(p->xstate.raw, 0, sizeof(p->xstate.raw)); else { @@ -268,7 +270,7 @@ void x86_cpuid_policy_clear_out_of_range_leaves(struct cpuid_policy *p) BUILD_BUG_ON(ARRAY_SIZE(p->xstate.raw) > 63); /* First two leaves always valid. Rest depend on xstates. */ - i = max(2, 64 - __builtin_clzll(cpuid_policy_xstates(p))); + i = max(2, 64 - __builtin_clzll(cpu_policy_xstates(p))); zero_leaves(p->xstate.raw, i, ARRAY_SIZE(p->xstate.raw) - 1); @@ -278,7 +280,7 @@ void x86_cpuid_policy_clear_out_of_range_leaves(struct cpuid_policy *p) ARRAY_SIZE(p->extd.raw) - 1); } -const uint32_t *x86_cpuid_lookup_deep_deps(uint32_t feature) +const uint32_t *x86_cpu_policy_lookup_deep_deps(uint32_t feature) { static const uint32_t deep_features[] = INIT_DEEP_FEATURES; static const struct { @@ -333,7 +335,7 @@ static int copy_leaf_to_buffer(uint32_t leaf, uint32_t subleaf, return 0; } -int x86_cpuid_copy_to_buffer(const struct cpuid_policy *p, +int x86_cpuid_copy_to_buffer(const struct cpu_policy *p, cpuid_leaf_buffer_t leaves, uint32_t *nr_entries_p) { const uint32_t nr_entries = *nr_entries_p; @@ -383,7 +385,7 @@ int x86_cpuid_copy_to_buffer(const struct cpuid_policy *p, case 0xd: { - uint64_t xstates = cpuid_policy_xstates(p); + uint64_t xstates = cpu_policy_xstates(p); COPY_LEAF(leaf, 0, &p->xstate.raw[0]); COPY_LEAF(leaf, 1, &p->xstate.raw[1]); @@ -419,7 +421,7 @@ int x86_cpuid_copy_to_buffer(const struct cpuid_policy *p, return 0; } -int x86_cpuid_copy_from_buffer(struct cpuid_policy *p, +int x86_cpuid_copy_from_buffer(struct cpu_policy *p, const cpuid_leaf_buffer_t leaves, uint32_t nr_entries, uint32_t *err_leaf, uint32_t *err_subleaf) @@ -522,7 +524,7 @@ int x86_cpuid_copy_from_buffer(struct cpuid_policy *p, } } - x86_cpuid_policy_recalc_synth(p); + x86_cpu_policy_recalc_synth(p); return 0; diff --git a/xen/lib/x86/msr.c b/xen/lib/x86/msr.c index c4d885e7b5..e04b9ca013 100644 --- a/xen/lib/x86/msr.c +++ b/xen/lib/x86/msr.c @@ -23,7 +23,7 @@ static int copy_msr_to_buffer(uint32_t idx, uint64_t val, return 0; } -int x86_msr_copy_to_buffer(const struct msr_policy *p, +int x86_msr_copy_to_buffer(const struct cpu_policy *p, msr_entry_buffer_t msrs, uint32_t *nr_entries_p) { const uint32_t nr_entries = *nr_entries_p; @@ -48,7 +48,7 @@ int x86_msr_copy_to_buffer(const struct msr_policy *p, return 0; } -int x86_msr_copy_from_buffer(struct msr_policy *p, +int x86_msr_copy_from_buffer(struct cpu_policy *p, const msr_entry_buffer_t msrs, uint32_t nr_entries, uint32_t *err_msr) { -- generated by git-patchbot for /home/xen/git/xen.git#stable-4.15
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |