[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v2 1/8] x86/pdx: simplify calculation of domain struct allocation boundary
When not using CONFIG_BIGMEM there are some restrictions in the address width for allocations of the domain structure, as it's PDX truncated to 32 bits it's stashed into page_info structure for domain allocated pages. The current logic to calculate this limit is based on the internals of the PDX compression used, which is not strictly required. Instead simplify the logic to rely on the existing PDX to PFN conversion helpers used elsewhere. This has the added benefit of allowing alternative PDX compression algorithms to be implemented without requiring to change the calculation of the domain structure allocation boundary. As a side effect introduce pdx_to_paddr() conversion macro and use it. Signed-off-by: Roger Pau Monné <roger.pau@xxxxxxxxxx> --- Changes since v1: - Use sizeof_field(). - Introduce and use pdx_to_paddr(). - Add comment. --- xen/arch/x86/domain.c | 40 +++++++++++----------------------------- xen/include/xen/pdx.h | 1 + 2 files changed, 12 insertions(+), 29 deletions(-) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index d025befe3d8e..14a0f6dda791 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -461,30 +461,6 @@ void domain_cpu_policy_changed(struct domain *d) } } -#if !defined(CONFIG_BIGMEM) && defined(CONFIG_PDX_COMPRESSION) -/* - * The hole may be at or above the 44-bit boundary, so we need to determine - * the total bit count until reaching 32 significant (not squashed out) bits - * in PFN representations. - * Note that the way "bits" gets initialized/updated/bounds-checked guarantees - * that the function will never return zero, and hence will never be called - * more than once (which is important due to it being deliberately placed in - * .init.text). - */ -static unsigned int __init noinline _domain_struct_bits(void) -{ - unsigned int bits = 32 + PAGE_SHIFT; - unsigned int sig = hweight32(~pfn_hole_mask); - unsigned int mask = pfn_hole_mask >> 32; - - for ( ; bits < BITS_PER_LONG && sig < 32; ++bits, mask >>= 1 ) - if ( !(mask & 1) ) - ++sig; - - return bits; -} -#endif - struct domain *alloc_domain_struct(void) { struct domain *d; @@ -498,14 +474,20 @@ struct domain *alloc_domain_struct(void) * On systems with CONFIG_BIGMEM there's no packing, and so there's no * such restriction. */ -#if defined(CONFIG_BIGMEM) || !defined(CONFIG_PDX_COMPRESSION) - const unsigned int bits = IS_ENABLED(CONFIG_BIGMEM) ? 0 : - 32 + PAGE_SHIFT; +#if defined(CONFIG_BIGMEM) + const unsigned int bits = 0; #else - static unsigned int __read_mostly bits; + static unsigned int __ro_after_init bits; if ( unlikely(!bits) ) - bits = _domain_struct_bits(); + /* + * Get the width for the next pfn, and unconditionally subtract one + * from it to ensure the used width will not allocate past the PDX + * field limit. + */ + bits = flsl(pdx_to_paddr(1UL << (sizeof_field(struct page_info, + v.inuse._domain) * 8))) + - 1; #endif BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE); diff --git a/xen/include/xen/pdx.h b/xen/include/xen/pdx.h index 9faeea3ac9f2..c1423d64a95b 100644 --- a/xen/include/xen/pdx.h +++ b/xen/include/xen/pdx.h @@ -99,6 +99,7 @@ bool __mfn_valid(unsigned long mfn); #define pdx_to_mfn(pdx) _mfn(pdx_to_pfn(pdx)) #define paddr_to_pdx(pa) pfn_to_pdx(paddr_to_pfn(pa)) +#define pdx_to_paddr(px) pfn_to_paddr(pdx_to_pfn(px)) #ifdef CONFIG_PDX_COMPRESSION -- 2.49.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |